Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE

This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.

FPIIM-449

Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/src/DEPS b/src/DEPS
index b54cd04..b0b703b 100644
--- a/src/DEPS
+++ b/src/DEPS
@@ -12,18 +12,12 @@
   "+src/interpreter/bytecode-array-iterator.h",
   "+src/interpreter/bytecodes.h",
   "+src/interpreter/interpreter.h",
+  "+src/interpreter/source-position-table.h",
   "-src/libplatform",
   "-include/libplatform"
 ]
 
 specific_include_rules = {
-  ".*\.h": [
-    # Note that src/v8.h by now is a regular header file, it doesn't provide
-    # any special declarations besides the V8 class. There should be no need
-    # for including it in any .h files though. This rule is just a reminder,
-    # and can be removed once the dust has settled.
-    "-src/v8.h",
-  ],
   "d8\.cc": [
     "+include/libplatform/libplatform.h",
   ],
diff --git a/src/accessors.cc b/src/accessors.cc
index 2094cdb..766509e 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -27,13 +27,15 @@
     AccessorNameSetterCallback setter,
     PropertyAttributes attributes) {
   Factory* factory = isolate->factory();
-  Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
+  Handle<AccessorInfo> info = factory->NewAccessorInfo();
   info->set_property_attributes(attributes);
   info->set_all_can_read(false);
   info->set_all_can_write(false);
   info->set_is_special_data_property(true);
+  name = factory->InternalizeName(name);
   info->set_name(*name);
   Handle<Object> get = v8::FromCData(isolate, getter);
+  if (setter == nullptr) setter = &ReconfigureToDataProperty;
   Handle<Object> set = v8::FromCData(isolate, setter);
   info->set_getter(*get);
   info->set_setter(*set);
@@ -41,21 +43,6 @@
 }
 
 
-Handle<ExecutableAccessorInfo> Accessors::CloneAccessor(
-    Isolate* isolate,
-    Handle<ExecutableAccessorInfo> accessor) {
-  Factory* factory = isolate->factory();
-  Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
-  info->set_name(accessor->name());
-  info->set_flag(accessor->flag());
-  info->set_expected_receiver_type(accessor->expected_receiver_type());
-  info->set_getter(accessor->getter());
-  info->set_setter(accessor->setter());
-  info->set_data(accessor->data());
-  return info;
-}
-
-
 static V8_INLINE bool CheckForName(Handle<Name> name,
                                    Handle<String> property_name,
                                    int offset,
@@ -96,6 +83,7 @@
 bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
                                                  Handle<Name> name,
                                                  int* object_offset) {
+  DCHECK(name->IsUniqueName());
   Isolate* isolate = name->GetIsolate();
 
   switch (map->instance_type()) {
@@ -113,7 +101,7 @@
 
       // Check if the property is overridden on the instance.
       DescriptorArray* descriptors = map->instance_descriptors();
-      int descriptor = descriptors->SearchWithCache(*name, *map);
+      int descriptor = descriptors->SearchWithCache(isolate, *name, *map);
       if (descriptor != DescriptorArray::kNotFound) return false;
 
       Handle<Object> proto = Handle<Object>(map->prototype(), isolate);
@@ -140,6 +128,50 @@
   }
 }
 
+MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
+    Isolate* isolate, Handle<JSObject> receiver, Handle<JSObject> holder,
+    Handle<Name> name, Handle<Object> value, bool observe) {
+  LookupIterator it(receiver, name, holder,
+                    LookupIterator::OWN_SKIP_INTERCEPTOR);
+  // Skip any access checks we might hit. This accessor should never hit in a
+  // situation where the caller does not have access.
+  if (it.state() == LookupIterator::ACCESS_CHECK) {
+    CHECK(it.HasAccess());
+    it.Next();
+  }
+  CHECK_EQ(LookupIterator::ACCESSOR, it.state());
+
+  Handle<Object> old_value;
+  bool is_observed = observe && receiver->map()->is_observed();
+  if (is_observed) {
+    MaybeHandle<Object> maybe_old = Object::GetPropertyWithAccessor(&it);
+    if (!maybe_old.ToHandle(&old_value)) return maybe_old;
+  }
+
+  it.ReconfigureDataProperty(value, it.property_attributes());
+
+  if (is_observed && !old_value->SameValue(*value)) {
+    return JSObject::EnqueueChangeRecord(receiver, "update", name, old_value);
+  }
+
+  return value;
+}
+
+void Accessors::ReconfigureToDataProperty(
+    v8::Local<v8::Name> key, v8::Local<v8::Value> val,
+    const v8::PropertyCallbackInfo<void>& info) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  HandleScope scope(isolate);
+  Handle<JSObject> receiver =
+      Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
+  Handle<JSObject> holder =
+      Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
+  Handle<Name> name = Utils::OpenHandle(*key);
+  Handle<Object> value = Utils::OpenHandle(*val);
+  MaybeHandle<Object> result = ReplaceAccessorWithDataProperty(
+      isolate, receiver, holder, name, value, false);
+  if (result.is_null()) isolate->OptionalRescheduleException(false);
+}
 
 //
 // Accessors::ArgumentsIterator
@@ -156,29 +188,11 @@
 }
 
 
-void Accessors::ArgumentsIteratorSetter(
-    v8::Local<v8::Name> name, v8::Local<v8::Value> val,
-    const v8::PropertyCallbackInfo<void>& info) {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
-  HandleScope scope(isolate);
-  Handle<JSObject> object_handle =
-      Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
-  Handle<Object> value_handle = Utils::OpenHandle(*val);
-  Handle<Name> name_handle = Utils::OpenHandle(*name);
-
-  if (JSObject::DefinePropertyOrElementIgnoreAttributes(
-          object_handle, name_handle, value_handle, NONE)
-          .is_null()) {
-    isolate->OptionalRescheduleException(false);
-  }
-}
-
-
 Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
     Isolate* isolate, PropertyAttributes attributes) {
   Handle<Name> name = isolate->factory()->iterator_symbol();
-  return MakeAccessor(isolate, name, &ArgumentsIteratorGetter,
-                      &ArgumentsIteratorSetter, attributes);
+  return MakeAccessor(isolate, name, &ArgumentsIteratorGetter, nullptr,
+                      attributes);
 }
 
 
@@ -219,6 +233,19 @@
   if (JSArray::ObservableSetLength(array, length).is_null()) {
     isolate->OptionalRescheduleException(false);
   }
+
+  if (info.ShouldThrowOnError()) {
+    uint32_t actual_new_len = 0;
+    CHECK(array->length()->ToArrayLength(&actual_new_len));
+    // Throw TypeError if there were non-deletable elements.
+    if (actual_new_len != length) {
+      Factory* factory = isolate->factory();
+      isolate->Throw(*factory->NewTypeError(
+          MessageTemplate::kStrictDeleteProperty,
+          factory->NewNumberFromUint(actual_new_len - 1), array));
+      isolate->OptionalRescheduleException(false);
+    }
+  }
 }
 
 
@@ -259,21 +286,10 @@
 }
 
 
-void Accessors::StringLengthSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::StringLengthInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  return MakeAccessor(isolate,
-                      isolate->factory()->length_string(),
-                      &StringLengthGetter,
-                      &StringLengthSetter,
-                      attributes);
+  return MakeAccessor(isolate, isolate->factory()->length_string(),
+                      &StringLengthGetter, nullptr, attributes);
 }
 
 
@@ -295,22 +311,11 @@
 }
 
 
-void Accessors::ScriptColumnOffsetSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
       STATIC_CHAR_VECTOR("column_offset")));
-  return MakeAccessor(isolate,
-                      name,
-                      &ScriptColumnOffsetGetter,
-                      &ScriptColumnOffsetSetter,
+  return MakeAccessor(isolate, name, &ScriptColumnOffsetGetter, nullptr,
                       attributes);
 }
 
@@ -332,23 +337,11 @@
 }
 
 
-void Accessors::ScriptIdSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptIdInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(
       isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("id")));
-  return MakeAccessor(isolate,
-                      name,
-                      &ScriptIdGetter,
-                      &ScriptIdSetter,
-                      attributes);
+  return MakeAccessor(isolate, name, &ScriptIdGetter, nullptr, attributes);
 }
 
 
@@ -369,21 +362,10 @@
 }
 
 
-void Accessors::ScriptNameSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptNameInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  return MakeAccessor(isolate,
-                      isolate->factory()->name_string(),
-                      &ScriptNameGetter,
-                      &ScriptNameSetter,
-                      attributes);
+  return MakeAccessor(isolate, isolate->factory()->name_string(),
+                      &ScriptNameGetter, nullptr, attributes);
 }
 
 
@@ -404,21 +386,10 @@
 }
 
 
-void Accessors::ScriptSourceSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptSourceInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  return MakeAccessor(isolate,
-                      isolate->factory()->source_string(),
-                      &ScriptSourceGetter,
-                      &ScriptSourceSetter,
-                      attributes);
+  return MakeAccessor(isolate, isolate->factory()->source_string(),
+                      &ScriptSourceGetter, nullptr, attributes);
 }
 
 
@@ -440,22 +411,11 @@
 }
 
 
-void Accessors::ScriptLineOffsetSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
       STATIC_CHAR_VECTOR("line_offset")));
-  return MakeAccessor(isolate,
-                      name,
-                      &ScriptLineOffsetGetter,
-                      &ScriptLineOffsetSetter,
+  return MakeAccessor(isolate, name, &ScriptLineOffsetGetter, nullptr,
                       attributes);
 }
 
@@ -478,23 +438,11 @@
 }
 
 
-void Accessors::ScriptTypeSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptTypeInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(
       isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("type")));
-  return MakeAccessor(isolate,
-                      name,
-                      &ScriptTypeGetter,
-                      &ScriptTypeSetter,
-                      attributes);
+  return MakeAccessor(isolate, name, &ScriptTypeGetter, nullptr, attributes);
 }
 
 
@@ -516,22 +464,11 @@
 }
 
 
-void Accessors::ScriptCompilationTypeSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
       STATIC_CHAR_VECTOR("compilation_type")));
-  return MakeAccessor(isolate,
-                      name,
-                      &ScriptCompilationTypeGetter,
-                      &ScriptCompilationTypeSetter,
+  return MakeAccessor(isolate, name, &ScriptCompilationTypeGetter, nullptr,
                       attributes);
 }
 
@@ -561,22 +498,11 @@
 }
 
 
-void Accessors::ScriptLineEndsSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptLineEndsInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
       STATIC_CHAR_VECTOR("line_ends")));
-  return MakeAccessor(isolate,
-                      name,
-                      &ScriptLineEndsGetter,
-                      &ScriptLineEndsSetter,
+  return MakeAccessor(isolate, name, &ScriptLineEndsGetter, nullptr,
                       attributes);
 }
 
@@ -598,21 +524,10 @@
 }
 
 
-void Accessors::ScriptSourceUrlSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptSourceUrlInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  return MakeAccessor(isolate,
-                      isolate->factory()->source_url_string(),
-                      &ScriptSourceUrlGetter,
-                      &ScriptSourceUrlSetter,
-                      attributes);
+  return MakeAccessor(isolate, isolate->factory()->source_url_string(),
+                      &ScriptSourceUrlGetter, nullptr, attributes);
 }
 
 
@@ -634,21 +549,10 @@
 }
 
 
-void Accessors::ScriptSourceMappingUrlSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  return MakeAccessor(isolate,
-                      isolate->factory()->source_mapping_url_string(),
-                      &ScriptSourceMappingUrlGetter,
-                      &ScriptSourceMappingUrlSetter,
-                      attributes);
+  return MakeAccessor(isolate, isolate->factory()->source_mapping_url_string(),
+                      &ScriptSourceMappingUrlGetter, nullptr, attributes);
 }
 
 
@@ -671,19 +575,12 @@
 }
 
 
-void Accessors::ScriptIsEmbedderDebugScriptSetter(
-    v8::Local<v8::Name> name, v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptIsEmbedderDebugScriptInfo(
     Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
       STATIC_CHAR_VECTOR("is_debugger_script")));
   return MakeAccessor(isolate, name, &ScriptIsEmbedderDebugScriptGetter,
-                      &ScriptIsEmbedderDebugScriptSetter, attributes);
+                      nullptr, attributes);
 }
 
 
@@ -704,22 +601,11 @@
 }
 
 
-void Accessors::ScriptContextDataSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptContextDataInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
       STATIC_CHAR_VECTOR("context_data")));
-  return MakeAccessor(isolate,
-                      name,
-                      &ScriptContextDataGetter,
-                      &ScriptContextDataSetter,
+  return MakeAccessor(isolate, name, &ScriptContextDataGetter, nullptr,
                       attributes);
 }
 
@@ -751,22 +637,11 @@
 }
 
 
-void Accessors::ScriptEvalFromScriptSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
       STATIC_CHAR_VECTOR("eval_from_script")));
-  return MakeAccessor(isolate,
-                      name,
-                      &ScriptEvalFromScriptGetter,
-                      &ScriptEvalFromScriptSetter,
+  return MakeAccessor(isolate, name, &ScriptEvalFromScriptGetter, nullptr,
                       attributes);
 }
 
@@ -789,7 +664,6 @@
     Handle<Code> code(SharedFunctionInfo::cast(
         script->eval_from_shared())->code());
     result = Handle<Object>(Smi::FromInt(code->SourcePosition(
-                                code->instruction_start() +
                                 script->eval_from_instructions_offset())),
                             isolate);
   }
@@ -797,23 +671,12 @@
 }
 
 
-void Accessors::ScriptEvalFromScriptPositionSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
       STATIC_CHAR_VECTOR("eval_from_script_position")));
-  return MakeAccessor(isolate,
-                      name,
-                      &ScriptEvalFromScriptPositionGetter,
-                      &ScriptEvalFromScriptPositionSetter,
-                      attributes);
+  return MakeAccessor(isolate, name, &ScriptEvalFromScriptPositionGetter,
+                      nullptr, attributes);
 }
 
 
@@ -843,22 +706,11 @@
 }
 
 
-void Accessors::ScriptEvalFromFunctionNameSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<void>& info) {
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::ScriptEvalFromFunctionNameInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
       STATIC_CHAR_VECTOR("eval_from_function_name")));
-  return MakeAccessor(isolate,
-                      name,
-                      &ScriptEvalFromFunctionNameGetter,
-                      &ScriptEvalFromFunctionNameSetter,
+  return MakeAccessor(isolate, name, &ScriptEvalFromFunctionNameGetter, nullptr,
                       attributes);
 }
 
@@ -976,59 +828,27 @@
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
-
-MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
-    Isolate* isolate, Handle<JSObject> object, Handle<Name> name,
-    Handle<Object> value, bool is_observed, Handle<Object> old_value) {
-  LookupIterator it(object, name);
-  CHECK_EQ(LookupIterator::ACCESSOR, it.state());
-  DCHECK(it.HolderIsReceiverOrHiddenPrototype());
-  it.ReconfigureDataProperty(value, it.property_details().attributes());
-
-  if (is_observed && !old_value->SameValue(*value)) {
-    return JSObject::EnqueueChangeRecord(object, "update", name, old_value);
-  }
-
-  return value;
-}
-
-
-MUST_USE_RESULT static MaybeHandle<Object> SetFunctionLength(
-    Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
-  Handle<Object> old_value;
-  bool is_observed = function->map()->is_observed();
-  if (is_observed) {
-    old_value = handle(Smi::FromInt(function->shared()->length()), isolate);
-  }
-
-  return ReplaceAccessorWithDataProperty(isolate, function,
-                                         isolate->factory()->length_string(),
-                                         value, is_observed, old_value);
-}
-
-
-void Accessors::FunctionLengthSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> val,
+void Accessors::ObservedReconfigureToDataProperty(
+    v8::Local<v8::Name> key, v8::Local<v8::Value> val,
     const v8::PropertyCallbackInfo<void>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
+  Handle<JSObject> receiver =
+      Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
+  Handle<JSObject> holder =
+      Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
+  Handle<Name> name = Utils::OpenHandle(*key);
   Handle<Object> value = Utils::OpenHandle(*val);
-
-  Handle<JSFunction> object =
-      Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
-  if (SetFunctionLength(isolate, object, value).is_null()) {
-    isolate->OptionalRescheduleException(false);
-  }
+  MaybeHandle<Object> result = ReplaceAccessorWithDataProperty(
+      isolate, receiver, holder, name, value, true);
+  if (result.is_null()) isolate->OptionalRescheduleException(false);
 }
 
 
 Handle<AccessorInfo> Accessors::FunctionLengthInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  return MakeAccessor(isolate,
-                      isolate->factory()->length_string(),
-                      &FunctionLengthGetter,
-                      &FunctionLengthSetter,
+  return MakeAccessor(isolate, isolate->factory()->length_string(),
+                      &FunctionLengthGetter, &ObservedReconfigureToDataProperty,
                       attributes);
 }
 
@@ -1054,43 +874,10 @@
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
-
-MUST_USE_RESULT static MaybeHandle<Object> SetFunctionName(
-    Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
-  Handle<Object> old_value;
-  bool is_observed = function->map()->is_observed();
-  if (is_observed) {
-    old_value = handle(function->shared()->name(), isolate);
-  }
-
-  return ReplaceAccessorWithDataProperty(isolate, function,
-                                         isolate->factory()->name_string(),
-                                         value, is_observed, old_value);
-}
-
-
-void Accessors::FunctionNameSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> val,
-    const v8::PropertyCallbackInfo<void>& info) {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
-  HandleScope scope(isolate);
-  Handle<Object> value = Utils::OpenHandle(*val);
-
-  Handle<JSFunction> object =
-      Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
-  if (SetFunctionName(isolate, object, value).is_null()) {
-    isolate->OptionalRescheduleException(false);
-  }
-}
-
-
 Handle<AccessorInfo> Accessors::FunctionNameInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  return MakeAccessor(isolate,
-                      isolate->factory()->name_string(),
-                      &FunctionNameGetter,
-                      &FunctionNameSetter,
+  return MakeAccessor(isolate, isolate->factory()->name_string(),
+                      &FunctionNameGetter, &ObservedReconfigureToDataProperty,
                       attributes);
 }
 
@@ -1158,10 +945,10 @@
 }
 
 
+namespace {
+
 Handle<Object> GetFunctionArguments(Isolate* isolate,
                                     Handle<JSFunction> function) {
-  if (function->shared()->native()) return isolate->factory()->null_value();
-
   // Find the top invocation of the function by traversing frames.
   for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
     JavaScriptFrame* frame = it.frame();
@@ -1200,9 +987,14 @@
   return isolate->factory()->null_value();
 }
 
+}  // namespace
 
-Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
-  return GetFunctionArguments(function->GetIsolate(), function);
+
+Handle<JSObject> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
+  Handle<Object> arguments =
+      GetFunctionArguments(function->GetIsolate(), function);
+  CHECK(arguments->IsJSObject());
+  return Handle<JSObject>::cast(arguments);
 }
 
 
@@ -1213,27 +1005,18 @@
   HandleScope scope(isolate);
   Handle<JSFunction> function =
       Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
-  Handle<Object> result = GetFunctionArguments(isolate, function);
+  Handle<Object> result =
+      function->shared()->native()
+          ? Handle<Object>::cast(isolate->factory()->null_value())
+          : GetFunctionArguments(isolate, function);
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
 
-void Accessors::FunctionArgumentsSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> val,
-    const v8::PropertyCallbackInfo<void>& info) {
-  // Function arguments is non writable, non configurable.
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::FunctionArgumentsInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  return MakeAccessor(isolate,
-                      isolate->factory()->arguments_string(),
-                      &FunctionArgumentsGetter,
-                      &FunctionArgumentsSetter,
-                      attributes);
+  return MakeAccessor(isolate, isolate->factory()->arguments_string(),
+                      &FunctionArgumentsGetter, nullptr, attributes);
 }
 
 
@@ -1363,22 +1146,10 @@
 }
 
 
-void Accessors::FunctionCallerSetter(
-    v8::Local<v8::Name> name,
-    v8::Local<v8::Value> val,
-    const v8::PropertyCallbackInfo<void>& info) {
-  // Function caller is non writable, non configurable.
-  UNREACHABLE();
-}
-
-
 Handle<AccessorInfo> Accessors::FunctionCallerInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  return MakeAccessor(isolate,
-                      isolate->factory()->caller_string(),
-                      &FunctionCallerGetter,
-                      &FunctionCallerSetter,
-                      attributes);
+  return MakeAccessor(isolate, isolate->factory()->caller_string(),
+                      &FunctionCallerGetter, nullptr, attributes);
 }
 
 
@@ -1386,9 +1157,8 @@
 // Accessors::MakeModuleExport
 //
 
-static void ModuleGetExport(
-    v8::Local<v8::String> property,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+static void ModuleGetExport(v8::Local<v8::Name> property,
+                            const v8::PropertyCallbackInfo<v8::Value>& info) {
   JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
   Context* context = Context::cast(instance->context());
   DCHECK(context->IsModuleContext());
@@ -1397,7 +1167,7 @@
                  ->Int32Value(info.GetIsolate()->GetCurrentContext())
                  .FromMaybe(-1);
   if (slot < 0 || slot >= context->length()) {
-    Handle<String> name = v8::Utils::OpenHandle(*property);
+    Handle<Name> name = v8::Utils::OpenHandle(*property);
 
     Handle<Object> exception = isolate->factory()->NewReferenceError(
         MessageTemplate::kNotDefined, name);
@@ -1406,7 +1176,7 @@
   }
   Object* value = context->get(slot);
   if (value->IsTheHole()) {
-    Handle<String> name = v8::Utils::OpenHandle(*property);
+    Handle<Name> name = v8::Utils::OpenHandle(*property);
 
     Handle<Object> exception = isolate->factory()->NewReferenceError(
         MessageTemplate::kNotDefined, name);
@@ -1417,33 +1187,15 @@
 }
 
 
-static void ModuleSetExport(
-    v8::Local<v8::String> property,
-    v8::Local<v8::Value> value,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
-  JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
-  Context* context = Context::cast(instance->context());
-  DCHECK(context->IsModuleContext());
-  Isolate* isolate = instance->GetIsolate();
-  int slot = info.Data()
-                 ->Int32Value(info.GetIsolate()->GetCurrentContext())
-                 .FromMaybe(-1);
-  if (slot < 0 || slot >= context->length()) {
-    Handle<String> name = v8::Utils::OpenHandle(*property);
-    Handle<Object> exception = isolate->factory()->NewReferenceError(
-        MessageTemplate::kNotDefined, name);
-    isolate->ScheduleThrow(*exception);
-    return;
-  }
-  Object* old_value = context->get(slot);
-  if (old_value->IsTheHole()) {
-    Handle<String> name = v8::Utils::OpenHandle(*property);
-    Handle<Object> exception = isolate->factory()->NewReferenceError(
-        MessageTemplate::kNotDefined, name);
-    isolate->ScheduleThrow(*exception);
-    return;
-  }
-  context->set(slot, *v8::Utils::OpenHandle(*value));
+static void ModuleSetExport(v8::Local<v8::Name> property,
+                            v8::Local<v8::Value> value,
+                            const v8::PropertyCallbackInfo<void>& info) {
+  if (!info.ShouldThrowOnError()) return;
+  Handle<Name> name = v8::Utils::OpenHandle(*property);
+  Isolate* isolate = name->GetIsolate();
+  Handle<Object> exception =
+      isolate->factory()->NewTypeError(MessageTemplate::kNotDefined, name);
+  isolate->ScheduleThrow(*exception);
 }
 
 
@@ -1452,17 +1204,9 @@
     int index,
     PropertyAttributes attributes) {
   Isolate* isolate = name->GetIsolate();
-  Factory* factory = isolate->factory();
-  Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
-  info->set_property_attributes(attributes);
-  info->set_all_can_read(true);
-  info->set_all_can_write(true);
-  info->set_name(*name);
+  Handle<AccessorInfo> info = MakeAccessor(isolate, name, &ModuleGetExport,
+                                           &ModuleSetExport, attributes);
   info->set_data(Smi::FromInt(index));
-  Handle<Object> getter = v8::FromCData(isolate, &ModuleGetExport);
-  Handle<Object> setter = v8::FromCData(isolate, &ModuleSetExport);
-  info->set_getter(*getter);
-  if (!(attributes & ReadOnly)) info->set_setter(*setter);
   return info;
 }
 
diff --git a/src/accessors.h b/src/accessors.h
index 6c1765c..3fe550c 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -15,7 +15,7 @@
 namespace internal {
 
 // Forward declarations.
-class ExecutableAccessorInfo;
+class AccessorInfo;
 
 // The list of accessor descriptors. This is a second-order macro
 // taking a macro to be applied to all accessor descriptor names.
@@ -44,6 +44,12 @@
   V(ScriptIsEmbedderDebugScript)  \
   V(StringLength)
 
+#define ACCESSOR_SETTER_LIST(V)        \
+  V(ReconfigureToDataProperty)         \
+  V(ObservedReconfigureToDataProperty) \
+  V(ArrayLengthSetter)                 \
+  V(FunctionPrototypeSetter)
+
 // Accessors contains all predefined proxy accessors.
 
 class Accessors : public AllStatic {
@@ -53,16 +59,18 @@
   static void name##Getter(                               \
       v8::Local<v8::Name> name,                           \
       const v8::PropertyCallbackInfo<v8::Value>& info);   \
-  static void name##Setter(                               \
-      v8::Local<v8::Name> name,                           \
-      v8::Local<v8::Value> value,                         \
-      const v8::PropertyCallbackInfo<void>& info);   \
   static Handle<AccessorInfo> name##Info(                 \
       Isolate* isolate,                                   \
       PropertyAttributes attributes);
   ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
 #undef ACCESSOR_INFO_DECLARATION
 
+#define ACCESSOR_SETTER_DECLARATION(name)                                \
+  static void name(v8::Local<v8::Name> name, v8::Local<v8::Value> value, \
+                   const v8::PropertyCallbackInfo<void>& info);
+  ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
+#undef ACCESSOR_SETTER_DECLARATION
+
   enum DescriptorId {
 #define ACCESSOR_INFO_DECLARATION(name) \
     k##name##Getter, \
@@ -75,7 +83,7 @@
   // Accessor functions called directly from the runtime system.
   MUST_USE_RESULT static MaybeHandle<Object> FunctionSetPrototype(
       Handle<JSFunction> object, Handle<Object> value);
-  static Handle<Object> FunctionGetArguments(Handle<JSFunction> object);
+  static Handle<JSObject> FunctionGetArguments(Handle<JSFunction> object);
 
   // Accessor infos.
   static Handle<AccessorInfo> MakeModuleExport(
@@ -100,10 +108,6 @@
       AccessorNameGetterCallback getter,
       AccessorNameSetterCallback setter,
       PropertyAttributes attributes);
-
-  static Handle<ExecutableAccessorInfo> CloneAccessor(
-      Isolate* isolate,
-      Handle<ExecutableAccessorInfo> accessor);
 };
 
 }  // namespace internal
diff --git a/src/address-map.cc b/src/address-map.cc
index 681661a..86558e0 100644
--- a/src/address-map.cc
+++ b/src/address-map.cc
@@ -17,10 +17,10 @@
   for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
     Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
     Object* root = isolate->heap()->root(root_index);
+    if (!root->IsHeapObject()) continue;
     // Omit root entries that can be written after initialization. They must
     // not be referenced through the root list in the snapshot.
-    if (root->IsHeapObject() &&
-        isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
+    if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
       HeapObject* heap_object = HeapObject::cast(root);
       HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
       if (entry != NULL) {
@@ -29,6 +29,11 @@
       } else {
         SetValue(LookupEntry(map_, heap_object, true), i);
       }
+    } else {
+      // Immortal immovable root objects are constant and allocated on the first
+      // page of old space. Non-constant roots cannot be immortal immovable. The
+      // root index map contains all immortal immmovable root objects.
+      CHECK(!Heap::RootIsImmortalImmovable(root_index));
     }
   }
   isolate->set_root_index_map(map_);
diff --git a/src/api-experimental.cc b/src/api-experimental.cc
index 2b49e97..98d62e3 100644
--- a/src/api-experimental.cc
+++ b/src/api-experimental.cc
@@ -122,5 +122,10 @@
   FromApi(this)->CheckNotZeroOrJump(value_id, label_id);
 }
 
+FastAccessorBuilder::ValueId FastAccessorBuilder::Call(
+    v8::FunctionCallback callback, ValueId value_id) {
+  return FromApi(this)->Call(callback, value_id);
+}
+
 }  // namespace experimental
 }  // namespace v8
diff --git a/src/api-natives.cc b/src/api-natives.cc
index bc71e3e..3be2df0 100644
--- a/src/api-natives.cc
+++ b/src/api-natives.cc
@@ -16,8 +16,8 @@
 namespace {
 
 MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
-                                        Handle<ObjectTemplateInfo> data);
-
+                                        Handle<ObjectTemplateInfo> data,
+                                        bool is_hidden_prototype);
 
 MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
                                             Handle<FunctionTemplateInfo> data,
@@ -30,32 +30,36 @@
     return InstantiateFunction(isolate,
                                Handle<FunctionTemplateInfo>::cast(data), name);
   } else if (data->IsObjectTemplateInfo()) {
-    return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data));
+    return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
+                             false);
   } else {
     return data;
   }
 }
 
-
-MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
-                                           Handle<JSObject> object,
-                                           Handle<Name> name,
-                                           Handle<Object> getter,
-                                           Handle<Object> setter,
-                                           PropertyAttributes attributes) {
-  if (!getter->IsUndefined()) {
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, getter,
-        InstantiateFunction(isolate,
-                            Handle<FunctionTemplateInfo>::cast(getter)),
-        Object);
-  }
-  if (!setter->IsUndefined()) {
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, setter,
-        InstantiateFunction(isolate,
-                            Handle<FunctionTemplateInfo>::cast(setter)),
-        Object);
+MaybeHandle<Object> DefineAccessorProperty(
+    Isolate* isolate, Handle<JSObject> object, Handle<Name> name,
+    Handle<Object> getter, Handle<Object> setter, PropertyAttributes attributes,
+    bool force_instantiate) {
+  DCHECK(!getter->IsFunctionTemplateInfo() ||
+         !FunctionTemplateInfo::cast(*getter)->do_not_cache());
+  DCHECK(!setter->IsFunctionTemplateInfo() ||
+         !FunctionTemplateInfo::cast(*setter)->do_not_cache());
+  if (force_instantiate) {
+    if (getter->IsFunctionTemplateInfo()) {
+      ASSIGN_RETURN_ON_EXCEPTION(
+          isolate, getter,
+          InstantiateFunction(isolate,
+                              Handle<FunctionTemplateInfo>::cast(getter)),
+          Object);
+    }
+    if (setter->IsFunctionTemplateInfo()) {
+      ASSIGN_RETURN_ON_EXCEPTION(
+          isolate, setter,
+          InstantiateFunction(isolate,
+                              Handle<FunctionTemplateInfo>::cast(setter)),
+          Object);
+    }
   }
   RETURN_ON_EXCEPTION(isolate, JSObject::DefineAccessor(object, name, getter,
                                                         setter, attributes),
@@ -148,17 +152,78 @@
   return nullptr;
 }
 
+// Returns parent function template or null.
+FunctionTemplateInfo* GetParent(FunctionTemplateInfo* data) {
+  Object* parent = data->parent_template();
+  return parent->IsUndefined() ? nullptr : FunctionTemplateInfo::cast(parent);
+}
 
+// Starting from given object template's constructor walk up the inheritance
+// chain till a function template that has an instance template is found.
+ObjectTemplateInfo* GetParent(ObjectTemplateInfo* data) {
+  Object* maybe_ctor = data->constructor();
+  if (maybe_ctor->IsUndefined()) return nullptr;
+  FunctionTemplateInfo* ctor = FunctionTemplateInfo::cast(maybe_ctor);
+  while (true) {
+    ctor = GetParent(ctor);
+    if (ctor == nullptr) return nullptr;
+    Object* maybe_obj = ctor->instance_template();
+    if (!maybe_obj->IsUndefined()) return ObjectTemplateInfo::cast(maybe_obj);
+  }
+}
+
+template <typename TemplateInfoT>
 MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
-                                        Handle<TemplateInfo> data) {
+                                        Handle<TemplateInfoT> data,
+                                        bool is_hidden_prototype) {
+  HandleScope scope(isolate);
+  // Disable access checks while instantiating the object.
+  AccessCheckDisableScope access_check_scope(isolate, obj);
+
+  // Walk the inheritance chain and copy all accessors to current object.
+  int max_number_of_properties = 0;
+  TemplateInfoT* info = *data;
+  while (info != nullptr) {
+    if (!info->property_accessors()->IsUndefined()) {
+      Object* props = info->property_accessors();
+      if (!props->IsUndefined()) {
+        Handle<Object> props_handle(props, isolate);
+        NeanderArray props_array(props_handle);
+        max_number_of_properties += props_array.length();
+      }
+    }
+    info = GetParent(info);
+  }
+
+  if (max_number_of_properties > 0) {
+    int valid_descriptors = 0;
+    // Use a temporary FixedArray to accumulate unique accessors.
+    Handle<FixedArray> array =
+        isolate->factory()->NewFixedArray(max_number_of_properties);
+
+    info = *data;
+    while (info != nullptr) {
+      // Accumulate accessors.
+      if (!info->property_accessors()->IsUndefined()) {
+        Handle<Object> props(info->property_accessors(), isolate);
+        valid_descriptors =
+            AccessorInfo::AppendUnique(props, array, valid_descriptors);
+      }
+      info = GetParent(info);
+    }
+
+    // Install accumulated accessors.
+    for (int i = 0; i < valid_descriptors; i++) {
+      Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
+      JSObject::SetAccessor(obj, accessor).Assert();
+    }
+  }
+
   auto property_list = handle(data->property_list(), isolate);
   if (property_list->IsUndefined()) return obj;
   // TODO(dcarney): just use a FixedArray here.
   NeanderArray properties(property_list);
   if (properties.length() == 0) return obj;
-  HandleScope scope(isolate);
-  // Disable access checks while instantiating the object.
-  AccessCheckDisableScope access_check_scope(isolate, obj);
 
   int i = 0;
   for (int c = 0; c < data->number_of_properties(); c++) {
@@ -171,17 +236,16 @@
 
       if (kind == kData) {
         auto prop_data = handle(properties.get(i++), isolate);
-
         RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
                                                         prop_data, attributes),
                             JSObject);
       } else {
         auto getter = handle(properties.get(i++), isolate);
         auto setter = handle(properties.get(i++), isolate);
-        RETURN_ON_EXCEPTION(isolate,
-                            DefineAccessorProperty(isolate, obj, name, getter,
-                                                   setter, attributes),
-                            JSObject);
+        RETURN_ON_EXCEPTION(
+            isolate, DefineAccessorProperty(isolate, obj, name, getter, setter,
+                                            attributes, is_hidden_prototype),
+            JSObject);
       }
     } else {
       // Intrinsic data property --- Get appropriate value from the current
@@ -202,14 +266,28 @@
   return obj;
 }
 
+void CacheTemplateInstantiation(Isolate* isolate, Handle<Smi> serial_number,
+                                Handle<JSObject> object) {
+  auto cache = isolate->template_instantiations_cache();
+  auto new_cache = ObjectHashTable::Put(cache, serial_number, object);
+  isolate->native_context()->set_template_instantiations_cache(*new_cache);
+}
+
+void UncacheTemplateInstantiation(Isolate* isolate, Handle<Smi> serial_number) {
+  auto cache = isolate->template_instantiations_cache();
+  bool was_present = false;
+  auto new_cache = ObjectHashTable::Remove(cache, serial_number, &was_present);
+  DCHECK(was_present);
+  isolate->native_context()->set_template_instantiations_cache(*new_cache);
+}
 
 MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
-                                        Handle<ObjectTemplateInfo> data) {
+                                        Handle<ObjectTemplateInfo> info,
+                                        bool is_hidden_prototype) {
   // Enter a new scope.  Recursion could otherwise create a lot of handles.
   HandleScope scope(isolate);
   // Fast path.
   Handle<JSObject> result;
-  auto info = Handle<ObjectTemplateInfo>::cast(data);
   auto constructor = handle(info->constructor(), isolate);
   Handle<JSFunction> cons;
   if (constructor->IsUndefined()) {
@@ -219,39 +297,42 @@
     ASSIGN_RETURN_ON_EXCEPTION(
         isolate, cons, InstantiateFunction(isolate, cons_templ), JSFunction);
   }
+  auto serial_number = handle(Smi::cast(info->serial_number()), isolate);
+  if (serial_number->value()) {
+    // Probe cache.
+    auto cache = isolate->template_instantiations_cache();
+    Object* boilerplate = cache->Lookup(serial_number);
+    if (boilerplate->IsJSObject()) {
+      result = handle(JSObject::cast(boilerplate), isolate);
+      ASSIGN_RETURN_ON_EXCEPTION(
+          isolate, result, JSObject::DeepCopyApiBoilerplate(result), JSObject);
+      return scope.CloseAndEscape(result);
+    }
+  }
   auto object = isolate->factory()->NewJSObject(cons);
   ASSIGN_RETURN_ON_EXCEPTION(
-      isolate, result, ConfigureInstance(isolate, object, info), JSFunction);
+      isolate, result,
+      ConfigureInstance(isolate, object, info, is_hidden_prototype),
+      JSFunction);
   // TODO(dcarney): is this necessary?
   JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
+
+  if (serial_number->value()) {
+    CacheTemplateInstantiation(isolate, serial_number, result);
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, result, JSObject::DeepCopyApiBoilerplate(result), JSObject);
+  }
   return scope.CloseAndEscape(result);
 }
 
 
-void CacheFunction(Isolate* isolate, Handle<Smi> serial_number,
-                   Handle<JSFunction> function) {
-  auto cache = isolate->function_cache();
-  auto new_cache = ObjectHashTable::Put(cache, serial_number, function);
-  isolate->native_context()->set_function_cache(*new_cache);
-}
-
-
-void UncacheFunction(Isolate* isolate, Handle<Smi> serial_number) {
-  auto cache = isolate->function_cache();
-  bool was_present = false;
-  auto new_cache = ObjectHashTable::Remove(cache, serial_number, &was_present);
-  DCHECK(was_present);
-  isolate->native_context()->set_function_cache(*new_cache);
-}
-
-
 MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
                                             Handle<FunctionTemplateInfo> data,
                                             Handle<Name> name) {
   auto serial_number = handle(Smi::cast(data->serial_number()), isolate);
-  // Probe cache.
-  if (!data->do_not_cache()) {
-    auto cache = isolate->function_cache();
+  if (serial_number->value()) {
+    // Probe cache.
+    auto cache = isolate->template_instantiations_cache();
     Object* element = cache->Lookup(serial_number);
     if (element->IsJSFunction()) {
       return handle(JSFunction::cast(element), isolate);
@@ -268,7 +349,8 @@
       ASSIGN_RETURN_ON_EXCEPTION(
           isolate, prototype,
           InstantiateObject(isolate,
-                            Handle<ObjectTemplateInfo>::cast(prototype_templ)),
+                            Handle<ObjectTemplateInfo>::cast(prototype_templ),
+                            data->hidden_prototype()),
           JSFunction);
     }
     auto parent = handle(data->parent_template(), isolate);
@@ -296,15 +378,16 @@
   if (!name.is_null() && name->IsString()) {
     function->shared()->set_name(*name);
   }
-  if (!data->do_not_cache()) {
+  if (serial_number->value()) {
     // Cache the function.
-    CacheFunction(isolate, serial_number, function);
+    CacheTemplateInstantiation(isolate, serial_number, function);
   }
-  auto result = ConfigureInstance(isolate, function, data);
+  auto result =
+      ConfigureInstance(isolate, function, data, data->hidden_prototype());
   if (result.is_null()) {
     // Uncache on error.
-    if (!data->do_not_cache()) {
-      UncacheFunction(isolate, serial_number);
+    if (serial_number->value()) {
+      UncacheTemplateInstantiation(isolate, serial_number);
     }
     return MaybeHandle<JSFunction>();
   }
@@ -364,23 +447,7 @@
     Handle<ObjectTemplateInfo> data) {
   Isolate* isolate = data->GetIsolate();
   InvokeScope invoke_scope(isolate);
-  return ::v8::internal::InstantiateObject(isolate, data);
-}
-
-
-MaybeHandle<FunctionTemplateInfo> ApiNatives::ConfigureInstance(
-    Isolate* isolate, Handle<FunctionTemplateInfo> desc,
-    Handle<JSObject> instance) {
-  // Configure the instance by adding the properties specified by the
-  // instance template.
-  if (desc->instance_template()->IsUndefined()) return desc;
-  InvokeScope invoke_scope(isolate);
-  Handle<ObjectTemplateInfo> instance_template(
-      ObjectTemplateInfo::cast(desc->instance_template()), isolate);
-  RETURN_ON_EXCEPTION(isolate, ::v8::internal::ConfigureInstance(
-                                   isolate, instance, instance_template),
-                      FunctionTemplateInfo);
-  return desc;
+  return ::v8::internal::InstantiateObject(isolate, data, false);
 }
 
 
@@ -527,11 +594,6 @@
     map->set_is_undetectable();
   }
 
-  // Mark as hidden for the __proto__ accessor if needed.
-  if (obj->hidden_prototype()) {
-    map->set_is_hidden_prototype();
-  }
-
   // Mark as needs_access_check if needed.
   if (obj->needs_access_check()) {
     map->set_is_access_check_needed(true);
@@ -548,73 +610,7 @@
   // Mark instance as callable in the map.
   if (!obj->instance_call_handler()->IsUndefined()) {
     map->set_is_callable();
-    map->set_is_constructor();
-  }
-
-  // Recursively copy parent instance templates' accessors,
-  // 'data' may be modified.
-  int max_number_of_additional_properties = 0;
-  int max_number_of_static_properties = 0;
-  FunctionTemplateInfo* info = *obj;
-  while (true) {
-    if (!info->instance_template()->IsUndefined()) {
-      Object* props = ObjectTemplateInfo::cast(info->instance_template())
-                          ->property_accessors();
-      if (!props->IsUndefined()) {
-        Handle<Object> props_handle(props, isolate);
-        NeanderArray props_array(props_handle);
-        max_number_of_additional_properties += props_array.length();
-      }
-    }
-    if (!info->property_accessors()->IsUndefined()) {
-      Object* props = info->property_accessors();
-      if (!props->IsUndefined()) {
-        Handle<Object> props_handle(props, isolate);
-        NeanderArray props_array(props_handle);
-        max_number_of_static_properties += props_array.length();
-      }
-    }
-    Object* parent = info->parent_template();
-    if (parent->IsUndefined()) break;
-    info = FunctionTemplateInfo::cast(parent);
-  }
-
-  Map::EnsureDescriptorSlack(map, max_number_of_additional_properties);
-
-  // Use a temporary FixedArray to acculumate static accessors
-  int valid_descriptors = 0;
-  Handle<FixedArray> array;
-  if (max_number_of_static_properties > 0) {
-    array = isolate->factory()->NewFixedArray(max_number_of_static_properties);
-  }
-
-  while (true) {
-    // Install instance descriptors
-    if (!obj->instance_template()->IsUndefined()) {
-      Handle<ObjectTemplateInfo> instance = Handle<ObjectTemplateInfo>(
-          ObjectTemplateInfo::cast(obj->instance_template()), isolate);
-      Handle<Object> props =
-          Handle<Object>(instance->property_accessors(), isolate);
-      if (!props->IsUndefined()) {
-        Map::AppendCallbackDescriptors(map, props);
-      }
-    }
-    // Accumulate static accessors
-    if (!obj->property_accessors()->IsUndefined()) {
-      Handle<Object> props = Handle<Object>(obj->property_accessors(), isolate);
-      valid_descriptors =
-          AccessorInfo::AppendUnique(props, array, valid_descriptors);
-    }
-    // Climb parent chain
-    Handle<Object> parent = Handle<Object>(obj->parent_template(), isolate);
-    if (parent->IsUndefined()) break;
-    obj = Handle<FunctionTemplateInfo>::cast(parent);
-  }
-
-  // Install accumulated static accessors
-  for (int i = 0; i < valid_descriptors; i++) {
-    Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
-    JSObject::SetAccessor(result, accessor).Assert();
+    map->set_is_constructor(true);
   }
 
   DCHECK(result->shared()->IsApiFunction());
diff --git a/src/api-natives.h b/src/api-natives.h
index fcca4a5..91f0b16 100644
--- a/src/api-natives.h
+++ b/src/api-natives.h
@@ -25,10 +25,6 @@
   MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject(
       Handle<ObjectTemplateInfo> data);
 
-  MUST_USE_RESULT static MaybeHandle<FunctionTemplateInfo> ConfigureInstance(
-      Isolate* isolate, Handle<FunctionTemplateInfo> instance,
-      Handle<JSObject> data);
-
   enum ApiInstanceType {
     JavaScriptObjectType,
     GlobalObjectType,
diff --git a/src/api.cc b/src/api.cc
index 8274a73..a71dcfa 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -15,6 +15,7 @@
 #include "include/v8-experimental.h"
 #include "include/v8-profiler.h"
 #include "include/v8-testing.h"
+#include "src/accessors.h"
 #include "src/api-experimental.h"
 #include "src/api-natives.h"
 #include "src/assert-scope.h"
@@ -34,11 +35,12 @@
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
 #include "src/execution.h"
+#include "src/gdb-jit.h"
 #include "src/global-handles.h"
 #include "src/icu_util.h"
 #include "src/isolate-inl.h"
+#include "src/json-parser.h"
 #include "src/messages.h"
-#include "src/parsing/json-parser.h"
 #include "src/parsing/parser.h"
 #include "src/parsing/scanner-character-streams.h"
 #include "src/pending-compilation-error-handler.h"
@@ -57,6 +59,7 @@
 #include "src/snapshot/natives.h"
 #include "src/snapshot/snapshot.h"
 #include "src/startup-data-util.h"
+#include "src/tracing/trace-event.h"
 #include "src/unicode-inl.h"
 #include "src/v8.h"
 #include "src/v8threads.h"
@@ -166,6 +169,7 @@
     isolate_->IncrementJsCallsFromApiCounter();
     isolate_->handle_scope_implementer()->IncrementCallDepth();
     if (!context_.IsEmpty()) context_->Enter();
+    if (do_callback_) isolate_->FireBeforeCallEnteredCallback();
   }
   ~CallDepthScope() {
     if (!context_.IsEmpty()) context_->Exit();
@@ -968,6 +972,9 @@
   info->set_flag(0);
 }
 
+static Local<ObjectTemplate> ObjectTemplateNew(
+    i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
+    bool do_not_cache);
 
 Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
   i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -975,8 +982,9 @@
   i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
                               i_isolate);
   if (result->IsUndefined()) {
-    v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
-    result = Utils::OpenHandle(*ObjectTemplate::New(isolate));
+    // Do not cache prototype objects.
+    result = Utils::OpenHandle(
+        *ObjectTemplateNew(i_isolate, Local<FunctionTemplate>(), true));
     Utils::OpenHandle(this)->set_prototype_template(*result);
   }
   return ToApiHandle<ObjectTemplate>(result);
@@ -1118,21 +1126,23 @@
   return obj;
 }
 
-
 template <typename Getter, typename Setter>
 static i::Handle<i::AccessorInfo> MakeAccessorInfo(
     v8::Local<Name> name, Getter getter, Setter setter, v8::Local<Value> data,
     v8::AccessControl settings, v8::PropertyAttribute attributes,
-    v8::Local<AccessorSignature> signature) {
+    v8::Local<AccessorSignature> signature, bool is_special_data_property) {
   i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
-  i::Handle<i::ExecutableAccessorInfo> obj =
-      isolate->factory()->NewExecutableAccessorInfo();
+  i::Handle<i::AccessorInfo> obj = isolate->factory()->NewAccessorInfo();
   SET_FIELD_WRAPPED(obj, set_getter, getter);
+  if (is_special_data_property && setter == nullptr) {
+    setter = reinterpret_cast<Setter>(&i::Accessors::ReconfigureToDataProperty);
+  }
   SET_FIELD_WRAPPED(obj, set_setter, setter);
   if (data.IsEmpty()) {
     data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
   }
   obj->set_data(*Utils::OpenHandle(*data));
+  obj->set_is_special_data_property(is_special_data_property);
   return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
 }
 
@@ -1224,9 +1234,9 @@
   return New(i::Isolate::Current(), Local<FunctionTemplate>());
 }
 
-
-Local<ObjectTemplate> ObjectTemplate::New(
-    i::Isolate* isolate, v8::Local<FunctionTemplate> constructor) {
+static Local<ObjectTemplate> ObjectTemplateNew(
+    i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
+    bool do_not_cache) {
   // Changes to the environment cannot be captured in the snapshot. Expect no
   // object templates when the isolate is created for serialization.
   DCHECK(!isolate->serializer_enabled());
@@ -1237,12 +1247,22 @@
   i::Handle<i::ObjectTemplateInfo> obj =
       i::Handle<i::ObjectTemplateInfo>::cast(struct_obj);
   InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
+  int next_serial_number = 0;
+  if (!do_not_cache) {
+    next_serial_number = isolate->next_serial_number() + 1;
+    isolate->set_next_serial_number(next_serial_number);
+  }
+  obj->set_serial_number(i::Smi::FromInt(next_serial_number));
   if (!constructor.IsEmpty())
     obj->set_constructor(*Utils::OpenHandle(*constructor));
   obj->set_internal_field_count(i::Smi::FromInt(0));
   return Utils::ToLocal(obj);
 }
 
+Local<ObjectTemplate> ObjectTemplate::New(
+    i::Isolate* isolate, v8::Local<FunctionTemplate> constructor) {
+  return ObjectTemplateNew(isolate, constructor, false);
+}
 
 // Ensure that the object template has a constructor.  If no
 // constructor is available we create one.
@@ -1263,39 +1283,20 @@
 }
 
 
-static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
-    i::Isolate* isolate,
-    Template* template_obj) {
-  return Utils::OpenHandle(template_obj);
-}
-
-
-// TODO(dcarney): remove this with ObjectTemplate::SetAccessor
-static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
-    i::Isolate* isolate,
-    ObjectTemplate* object_template) {
-  EnsureConstructor(isolate, object_template);
-  return Utils::OpenHandle(object_template);
-}
-
-
-template<typename Getter, typename Setter, typename Data, typename Template>
-static bool TemplateSetAccessor(
-    Template* template_obj,
-    v8::Local<Name> name,
-    Getter getter,
-    Setter setter,
-    Data data,
-    AccessControl settings,
-    PropertyAttribute attribute,
-    v8::Local<AccessorSignature> signature) {
-  auto isolate = Utils::OpenHandle(template_obj)->GetIsolate();
+template <typename Getter, typename Setter, typename Data, typename Template>
+static bool TemplateSetAccessor(Template* template_obj, v8::Local<Name> name,
+                                Getter getter, Setter setter, Data data,
+                                AccessControl settings,
+                                PropertyAttribute attribute,
+                                v8::Local<AccessorSignature> signature,
+                                bool is_special_data_property) {
+  auto info = Utils::OpenHandle(template_obj);
+  auto isolate = info->GetIsolate();
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   auto obj = MakeAccessorInfo(name, getter, setter, data, settings, attribute,
-                              signature);
+                              signature, is_special_data_property);
   if (obj.is_null()) return false;
-  auto info = GetTemplateInfo(isolate, template_obj);
   i::ApiNatives::AddNativeDataProperty(isolate, info, obj);
   return true;
 }
@@ -1308,8 +1309,8 @@
                                      PropertyAttribute attribute,
                                      v8::Local<AccessorSignature> signature,
                                      AccessControl settings) {
-  TemplateSetAccessor(
-      this, name, getter, setter, data, settings, attribute, signature);
+  TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+                      signature, true);
 }
 
 
@@ -1320,8 +1321,8 @@
                                      PropertyAttribute attribute,
                                      v8::Local<AccessorSignature> signature,
                                      AccessControl settings) {
-  TemplateSetAccessor(
-      this, name, getter, setter, data, settings, attribute, signature);
+  TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+                      signature, true);
 }
 
 
@@ -1343,8 +1344,8 @@
                                  v8::Local<Value> data, AccessControl settings,
                                  PropertyAttribute attribute,
                                  v8::Local<AccessorSignature> signature) {
-  TemplateSetAccessor(
-      this, name, getter, setter, data, settings, attribute, signature);
+  TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+                      signature, i::FLAG_disable_old_api_accessors);
 }
 
 
@@ -1354,8 +1355,8 @@
                                  v8::Local<Value> data, AccessControl settings,
                                  PropertyAttribute attribute,
                                  v8::Local<AccessorSignature> signature) {
-  TemplateSetAccessor(
-      this, name, getter, setter, data, settings, attribute, signature);
+  TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+                      signature, i::FLAG_disable_old_api_accessors);
 }
 
 
@@ -1451,6 +1452,10 @@
   cons->set_needs_access_check(true);
 }
 
+void ObjectTemplate::SetAccessCheckCallback(
+    DeprecatedAccessCheckCallback callback, Local<Value> data) {
+  SetAccessCheckCallback(reinterpret_cast<AccessCheckCallback>(callback), data);
+}
 
 void ObjectTemplate::SetAccessCheckCallbacks(
     NamedSecurityCallback named_callback,
@@ -1601,32 +1606,7 @@
       function_info(i::SharedFunctionInfo::cast(*obj), obj->GetIsolate());
   i::Isolate* isolate = obj->GetIsolate();
 
-  i::ScopeInfo* scope_info = function_info->scope_info();
   i::Handle<i::JSReceiver> global(isolate->native_context()->global_object());
-  for (int i = 0; i < scope_info->StrongModeFreeVariableCount(); ++i) {
-    i::Handle<i::String> name_string(scope_info->StrongModeFreeVariableName(i));
-    i::ScriptContextTable::LookupResult result;
-    i::Handle<i::ScriptContextTable> script_context_table(
-        isolate->native_context()->script_context_table());
-    if (!i::ScriptContextTable::Lookup(script_context_table, name_string,
-                                       &result)) {
-      i::Handle<i::Name> name(scope_info->StrongModeFreeVariableName(i));
-      Maybe<bool> has = i::JSReceiver::HasProperty(global, name);
-      if (has.IsJust() && !has.FromJust()) {
-        i::PendingCompilationErrorHandler pending_error_handler_;
-        pending_error_handler_.ReportMessageAt(
-            scope_info->StrongModeFreeVariableStartPosition(i),
-            scope_info->StrongModeFreeVariableEndPosition(i),
-            i::MessageTemplate::kStrongUnboundGlobal, name_string,
-            i::kReferenceError);
-        i::Handle<i::Script> script(i::Script::cast(function_info->script()));
-        pending_error_handler_.ThrowPendingError(isolate, script);
-        isolate->ReportPendingMessages();
-        isolate->OptionalRescheduleException(true);
-        return Local<Script>();
-      }
-    }
-  }
   i::Handle<i::JSFunction> function =
       obj->GetIsolate()->factory()->NewFunctionFromSharedFunctionInfo(
           function_info, isolate->native_context());
@@ -1707,6 +1687,7 @@
   PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Script::Run()", Value)
   i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+  TRACE_EVENT0("v8", "V8.Execute");
   auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
   i::Handle<i::Object> receiver(isolate->global_proxy(), isolate);
   Local<Value> result;
@@ -1760,6 +1741,7 @@
   i::Handle<i::SharedFunctionInfo> result;
   {
     i::HistogramTimerScope total(isolate->counters()->compile_script(), true);
+    TRACE_EVENT0("v8", "V8.CompileScript");
     i::Handle<i::Object> name_obj;
     i::Handle<i::Object> source_map_url;
     int line_offset = 0;
@@ -2929,11 +2911,11 @@
 
 MaybeLocal<Object> Value::ToObject(Local<Context> context) const {
   auto obj = Utils::OpenHandle(this);
-  if (obj->IsJSObject()) return ToApiHandle<Object>(obj);
+  if (obj->IsJSReceiver()) return ToApiHandle<Object>(obj);
   PREPARE_FOR_EXECUTION(context, "ToObject", Object);
   Local<Object> result;
   has_pending_exception =
-      !ToLocal<Object>(i::Execution::ToObject(isolate, obj), &result);
+      !ToLocal<Object>(i::Object::ToObject(isolate, obj), &result);
   RETURN_ON_FAILED_EXECUTION(Object);
   RETURN_ESCAPED(result);
 }
@@ -3307,16 +3289,14 @@
 
 Maybe<int64_t> Value::IntegerValue(Local<Context> context) const {
   auto obj = Utils::OpenHandle(this);
-  i::Handle<i::Object> num;
   if (obj->IsNumber()) {
-    num = obj;
-  } else {
-    PREPARE_FOR_EXECUTION_PRIMITIVE(context, "IntegerValue", int64_t);
-    has_pending_exception = !i::Object::ToInteger(isolate, obj).ToHandle(&num);
-    RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int64_t);
+    return Just(NumberToInt64(*obj));
   }
-  return Just(num->IsSmi() ? static_cast<int64_t>(i::Smi::cast(*num)->value())
-                           : static_cast<int64_t>(num->Number()));
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "IntegerValue", int64_t);
+  i::Handle<i::Object> num;
+  has_pending_exception = !i::Object::ToInteger(isolate, obj).ToHandle(&num);
+  RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int64_t);
+  return Just(NumberToInt64(*num));
 }
 
 
@@ -3558,7 +3538,8 @@
       isolate, js_object, key, &success, i::LookupIterator::OWN);
   if (!success) return i::MaybeHandle<i::Object>();
 
-  return i::JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs);
+  return i::JSObject::DefineOwnPropertyIgnoreAttributes(
+      &it, value, attrs, i::JSObject::FORCE_FIELD);
 }
 
 
@@ -3599,8 +3580,27 @@
 
 Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
                                    Local<Value> value) {
-  return DefineOwnProperty(context, Local<Name>(reinterpret_cast<Name*>(*key)),
-                           value, DontEnum);
+  PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetPrivate()", bool);
+  auto self = Utils::OpenHandle(this);
+  auto key_obj = Utils::OpenHandle(reinterpret_cast<Name*>(*key));
+  auto value_obj = Utils::OpenHandle(*value);
+  if (self->IsJSProxy()) {
+    i::PropertyDescriptor desc;
+    desc.set_writable(true);
+    desc.set_enumerable(false);
+    desc.set_configurable(true);
+    desc.set_value(value_obj);
+    return i::JSProxy::SetPrivateProperty(
+        isolate, i::Handle<i::JSProxy>::cast(self),
+        i::Handle<i::Symbol>::cast(key_obj), &desc, i::Object::DONT_THROW);
+  }
+  auto js_object = i::Handle<i::JSObject>::cast(self);
+  i::LookupIterator it(js_object, key_obj);
+  has_pending_exception = i::JSObject::DefineOwnPropertyIgnoreAttributes(
+                              &it, value_obj, i::DONT_ENUM)
+                              .is_null();
+  RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+  return Just(true);
 }
 
 
@@ -3752,8 +3752,7 @@
   auto self = Utils::OpenHandle(this);
   i::Handle<i::FixedArray> value;
   has_pending_exception =
-      !i::JSReceiver::GetKeys(self, i::JSReceiver::INCLUDE_PROTOS,
-                              i::ENUMERABLE_STRINGS)
+      !i::JSReceiver::GetKeys(self, i::INCLUDE_PROTOS, i::ENUMERABLE_STRINGS)
            .ToHandle(&value);
   RETURN_ON_FAILED_EXECUTION(Array);
   // Because we use caching to speed up enumeration it is important
@@ -3775,9 +3774,9 @@
   PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyNames()", Array);
   auto self = Utils::OpenHandle(this);
   i::Handle<i::FixedArray> value;
-  has_pending_exception = !i::JSReceiver::GetKeys(self, i::JSReceiver::OWN_ONLY,
-                                                  i::ENUMERABLE_STRINGS)
-                               .ToHandle(&value);
+  has_pending_exception =
+      !i::JSReceiver::GetKeys(self, i::OWN_ONLY, i::ENUMERABLE_STRINGS)
+           .ToHandle(&value);
   RETURN_ON_FAILED_EXECUTION(Array);
   // Because we use caching to speed up enumeration it is important
   // to never change the result of the basic enumeration function so
@@ -3920,7 +3919,7 @@
       i::Handle<i::JSObject>::cast(Utils::OpenHandle(self));
   v8::Local<AccessorSignature> signature;
   auto info = MakeAccessorInfo(name, getter, setter, data, settings, attributes,
-                               signature);
+                               signature, i::FLAG_disable_old_api_accessors);
   if (info.is_null()) return Nothing<bool>();
   bool fast = obj->HasFastProperties();
   i::Handle<i::Object> result;
@@ -4281,6 +4280,7 @@
   PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Object::CallAsFunction()",
                                       Value);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+  TRACE_EVENT0("v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
   auto recv_obj = Utils::OpenHandle(*recv);
   STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -4307,6 +4307,7 @@
   PREPARE_FOR_EXECUTION_WITH_CALLBACK(context,
                                       "v8::Object::CallAsConstructor()", Value);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+  TRACE_EVENT0("v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
   i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4356,6 +4357,7 @@
   PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::NewInstance()",
                                       Object);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+  TRACE_EVENT0("v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
   i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4379,6 +4381,7 @@
                                      v8::Local<v8::Value> argv[]) {
   PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::Call()", Value);
   i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+  TRACE_EVENT0("v8", "V8.Execute");
   auto self = Utils::OpenHandle(this);
   i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
   STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -7158,10 +7161,16 @@
   if (params.entry_hook) {
     isolate->set_function_entry_hook(params.entry_hook);
   }
-  if (params.code_event_handler) {
+  auto code_event_handler = params.code_event_handler;
+#ifdef ENABLE_GDB_JIT_INTERFACE
+  if (code_event_handler == nullptr && i::FLAG_gdbjit) {
+    code_event_handler = i::GDBJITInterface::EventHandler;
+  }
+#endif  // ENABLE_GDB_JIT_INTERFACE
+  if (code_event_handler) {
     isolate->InitializeLoggingAndCounters();
     isolate->logger()->SetCodeEventHandler(kJitCodeEventDefault,
-                                           params.code_event_handler);
+                                           code_event_handler);
   }
   if (params.counter_lookup_callback) {
     v8_isolate->SetCounterFunction(params.counter_lookup_callback);
@@ -7371,6 +7380,20 @@
 }
 
 
+void Isolate::AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback) {
+  if (callback == NULL) return;
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->AddBeforeCallEnteredCallback(callback);
+}
+
+
+void Isolate::RemoveBeforeCallEnteredCallback(
+    BeforeCallEnteredCallback callback) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->RemoveBeforeCallEnteredCallback(callback);
+}
+
+
 void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) {
   if (callback == NULL) return;
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -7384,6 +7407,19 @@
 }
 
 
+void Isolate::AddCallCompletedCallback(
+    DeprecatedCallCompletedCallback callback) {
+  AddCallCompletedCallback(reinterpret_cast<CallCompletedCallback>(callback));
+}
+
+
+void Isolate::RemoveCallCompletedCallback(
+    DeprecatedCallCompletedCallback callback) {
+  RemoveCallCompletedCallback(
+      reinterpret_cast<CallCompletedCallback>(callback));
+}
+
+
 void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
   if (callback == NULL) return;
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -7476,6 +7512,7 @@
   {
     i::HistogramTimerScope idle_notification_scope(
         isolate->counters()->gc_low_memory_notification());
+    TRACE_EVENT0("v8", "V8.GCLowMemoryNotification");
     isolate->heap()->CollectAllAvailableGarbage("low memory notification");
   }
 }
@@ -8050,6 +8087,9 @@
       base::TimeDelta::FromMicroseconds(us));
 }
 
+void CpuProfiler::CollectSample() {
+  reinterpret_cast<i::CpuProfiler*>(this)->CollectSample();
+}
 
 void CpuProfiler::StartProfiling(Local<String> title, bool record_samples) {
   reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
@@ -8278,6 +8318,23 @@
 }
 
 
+bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
+                                             int stack_depth) {
+  return reinterpret_cast<i::HeapProfiler*>(this)
+      ->StartSamplingHeapProfiler(sample_interval, stack_depth);
+}
+
+
+void HeapProfiler::StopSamplingHeapProfiler() {
+  reinterpret_cast<i::HeapProfiler*>(this)->StopSamplingHeapProfiler();
+}
+
+
+AllocationProfile* HeapProfiler::GetAllocationProfile() {
+  return reinterpret_cast<i::HeapProfiler*>(this)->GetAllocationProfile();
+}
+
+
 void HeapProfiler::DeleteAllHeapSnapshots() {
   reinterpret_cast<i::HeapProfiler*>(this)->DeleteAllSnapshots();
 }
diff --git a/src/arguments.cc b/src/arguments.cc
index a783357..077991b 100644
--- a/src/arguments.cc
+++ b/src/arguments.cc
@@ -70,16 +70,14 @@
   }
 
 
-#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2)                   \
-void PropertyCallbackArguments::Call(Function f,                               \
-                                     Arg1 arg1,                                \
-                                     Arg2 arg2) {                              \
-  Isolate* isolate = this->isolate();                                          \
-  VMState<EXTERNAL> state(isolate);                                            \
-  ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));                 \
-  PropertyCallbackInfo<ReturnValue> info(begin());                             \
-  f(arg1, arg2, info);                                                         \
-}
+#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2)               \
+  void PropertyCallbackArguments::Call(Function f, Arg1 arg1, Arg2 arg2) { \
+    Isolate* isolate = this->isolate();                                    \
+    VMState<EXTERNAL> state(isolate);                                      \
+    ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));           \
+    PropertyCallbackInfo<ReturnValue> info(begin());                       \
+    f(arg1, arg2, info);                                                   \
+  }
 
 
 FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
diff --git a/src/arguments.h b/src/arguments.h
index d11a8cd..3509677 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -152,17 +152,19 @@
   static const int kReturnValueDefaultValueIndex =
       T::kReturnValueDefaultValueIndex;
   static const int kIsolateIndex = T::kIsolateIndex;
+  static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
 
-  PropertyCallbackArguments(Isolate* isolate,
-                            Object* data,
-                            Object* self,
-                            JSObject* holder)
+  PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
+                            JSObject* holder, Object::ShouldThrow should_throw)
       : Super(isolate) {
     Object** values = this->begin();
     values[T::kThisIndex] = self;
     values[T::kHolderIndex] = holder;
     values[T::kDataIndex] = data;
     values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
+    values[T::kShouldThrowOnErrorIndex] =
+        Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
+
     // Here the hole is set as default value.
     // It cannot escape into js as it's remove in Call below.
     values[T::kReturnValueDefaultValueIndex] =
@@ -218,17 +220,14 @@
   static const int kCalleeIndex = T::kCalleeIndex;
   static const int kContextSaveIndex = T::kContextSaveIndex;
 
-  FunctionCallbackArguments(internal::Isolate* isolate,
-      internal::Object* data,
-      internal::JSFunction* callee,
-      internal::Object* holder,
-      internal::Object** argv,
-      int argc,
-      bool is_construct_call)
-        : Super(isolate),
-          argv_(argv),
-          argc_(argc),
-          is_construct_call_(is_construct_call) {
+  FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
+                            internal::HeapObject* callee,
+                            internal::Object* holder, internal::Object** argv,
+                            int argc, bool is_construct_call)
+      : Super(isolate),
+        argv_(argv),
+        argc_(argc),
+        is_construct_call_(is_construct_call) {
     Object** values = begin();
     values[T::kDataIndex] = data;
     values[T::kCalleeIndex] = callee;
@@ -240,7 +239,8 @@
     values[T::kReturnValueDefaultValueIndex] =
         isolate->heap()->the_hole_value();
     values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
-    DCHECK(values[T::kCalleeIndex]->IsJSFunction());
+    DCHECK(values[T::kCalleeIndex]->IsJSFunction() ||
+           values[T::kCalleeIndex]->IsFunctionTemplateInfo());
     DCHECK(values[T::kHolderIndex]->IsHeapObject());
     DCHECK(values[T::kIsolateIndex]->IsSmi());
   }
@@ -271,20 +271,23 @@
 #define CLOBBER_DOUBLE_REGISTERS()
 #endif
 
-
-#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name)                        \
-static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate));  \
-Type Name(int args_length, Object** args_object, Isolate* isolate) {     \
-  CLOBBER_DOUBLE_REGISTERS();                                            \
-  Arguments args(args_length, args_object);                              \
-  return __RT_impl_##Name(args, isolate);                                \
-}                                                                        \
-static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
-
+#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name)                         \
+  static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
+  Type Name(int args_length, Object** args_object, Isolate* isolate) {    \
+    CLOBBER_DOUBLE_REGISTERS();                                           \
+    RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();  \
+    RuntimeCallTimerScope timer(isolate, &stats->Name);                   \
+    Arguments args(args_length, args_object);                             \
+    Type value = __RT_impl_##Name(args, isolate);                         \
+    return value;                                                         \
+  }                                                                       \
+  static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
 
 #define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)
 #define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
     RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name)
+#define RUNTIME_FUNCTION_RETURN_TRIPLE(Name) \
+    RUNTIME_FUNCTION_RETURNS_TYPE(ObjectTriple, Name)
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index c9602ea..0de9642 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -138,8 +138,8 @@
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
       target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target));
   }
 }
 
@@ -197,10 +197,8 @@
   Address address = cell->address() + Cell::kValueOffset;
   Memory::Address_at(pc_) = address;
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
-    // TODO(1550) We are passing NULL as a slot because cell can never be on
-    // evacuation candidate.
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), NULL, cell);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+                                                                  cell);
   }
 }
 
@@ -263,23 +261,6 @@
 }
 
 
-bool RelocInfo::IsPatchedReturnSequence() {
-  Instr current_instr = Assembler::instr_at(pc_);
-  Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
-  // A patched return sequence is:
-  //  ldr ip, [pc, #0]
-  //  blx ip
-  return Assembler::IsLdrPcImmediateOffset(current_instr) &&
-         Assembler::IsBlxReg(next_instr);
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
-  Instr current_instr = Assembler::instr_at(pc_);
-  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index d2e3231..b0fa462 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -82,7 +82,7 @@
 
 void CpuFeatures::ProbeImpl(bool cross_compile) {
   supported_ |= CpuFeaturesImpliedByCompiler();
-  cache_line_size_ = 64;
+  dcache_line_size_ = 64;
 
   // Only use statically determined features for cross compile (snapshot).
   if (cross_compile) return;
@@ -137,7 +137,7 @@
   if (cpu.implementer() == base::CPU::ARM &&
       (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
        cpu.part() == base::CPU::ARM_CORTEX_A9)) {
-    cache_line_size_ = 32;
+    dcache_line_size_ = 32;
   }
 
   if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
@@ -1947,6 +1947,16 @@
 }
 
 
+void Assembler::rbit(Register dst, Register src, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.144.
+  // cond(31-28) | 011011111111(27-16) | Rd(15-12) | 11110011(11-4) | Rm(3-0)
+  DCHECK(IsEnabled(ARMv7));
+  DCHECK(!dst.is(pc));
+  DCHECK(!src.is(pc));
+  emit(cond | 0x6FF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
+}
+
+
 // Status register access instructions.
 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
   DCHECK(!dst.is(pc));
@@ -2135,6 +2145,21 @@
 }
 
 
+void Assembler::dmb(BarrierOption option) {
+  emit(kSpecialCondition | 0x57ff*B12 | 5*B4 | option);
+}
+
+
+void Assembler::dsb(BarrierOption option) {
+  emit(kSpecialCondition | 0x57ff*B12 | 4*B4 | option);
+}
+
+
+void Assembler::isb(BarrierOption option) {
+  emit(kSpecialCondition | 0x57ff*B12 | 6*B4 | option);
+}
+
+
 // Coprocessor instructions.
 void Assembler::cdp(Coprocessor coproc,
                     int opcode_1,
@@ -2923,6 +2948,24 @@
 }
 
 
+void Assembler::vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src,
+                             VFPConversionMode mode, const Condition cond) {
+  emit(EncodeVCVT(F32, dst.code(), U32, src.code(), mode, cond));
+}
+
+
+void Assembler::vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src,
+                             VFPConversionMode mode, const Condition cond) {
+  emit(EncodeVCVT(S32, dst.code(), F32, src.code(), mode, cond));
+}
+
+
+void Assembler::vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src,
+                             VFPConversionMode mode, const Condition cond) {
+  emit(EncodeVCVT(U32, dst.code(), F32, src.code(), mode, cond));
+}
+
+
 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
                              const DwVfpRegister src,
                              VFPConversionMode mode,
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 1abf1ab..d381653 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -285,6 +285,7 @@
 
 typedef QwNeonRegister QuadRegister;
 
+typedef QwNeonRegister Simd128Register;
 
 // Support for the VFP registers s0 to s31 (d0 to d15).
 // Note that "s(N):s(N+1)" is the same as "d(N/2)".
@@ -950,6 +951,9 @@
   void uxtah(Register dst, Register src1, Register src2, int rotate = 0,
              Condition cond = al);
 
+  // Reverse the bits in a register.
+  void rbit(Register dst, Register src, Condition cond = al);
+
   // Status register access instructions
 
   void mrs(Register dst, SRegister s, Condition cond = al);
@@ -986,6 +990,11 @@
   void bkpt(uint32_t imm16);  // v5 and above
   void svc(uint32_t imm24, Condition cond = al);
 
+  // Synchronization instructions
+  void dmb(BarrierOption option);
+  void dsb(BarrierOption option);
+  void isb(BarrierOption option);
+
   // Coprocessor instructions
 
   void cdp(Coprocessor coproc, int opcode_1,
@@ -1125,6 +1134,18 @@
                     const SwVfpRegister src,
                     VFPConversionMode mode = kDefaultRoundToZero,
                     const Condition cond = al);
+  void vcvt_f32_u32(const SwVfpRegister dst,
+                    const SwVfpRegister src,
+                    VFPConversionMode mode = kDefaultRoundToZero,
+                    const Condition cond = al);
+  void vcvt_s32_f32(const SwVfpRegister dst,
+                    const SwVfpRegister src,
+                    VFPConversionMode mode = kDefaultRoundToZero,
+                    const Condition cond = al);
+  void vcvt_u32_f32(const SwVfpRegister dst,
+                    const SwVfpRegister src,
+                    VFPConversionMode mode = kDefaultRoundToZero,
+                    const Condition cond = al);
   void vcvt_s32_f64(const SwVfpRegister dst,
                     const DwVfpRegister src,
                     VFPConversionMode mode = kDefaultRoundToZero,
@@ -1336,7 +1357,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, const SourcePosition position);
+  void RecordDeoptReason(const int reason, int raw_position);
 
   // Record the emission of a constant pool.
   //
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 0c83f91..a6bfdb1 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -137,6 +137,108 @@
 
 
 // static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- r0                 : number of arguments
+  //  -- lr                 : return address
+  //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- sp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Condition const cc_done = (kind == MathMaxMinKind::kMin) ? mi : gt;
+  Condition const cc_swap = (kind == MathMaxMinKind::kMin) ? gt : mi;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in r1 and the double value in d1.
+  __ LoadRoot(r1, root_index);
+  __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+
+  // Remember how many slots to drop (including the receiver).
+  __ add(r4, r0, Operand(1));
+
+  Label done_loop, loop;
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ sub(r0, r0, Operand(1), SetCC);
+    __ b(lt, &done_loop);
+
+    // Load the next parameter tagged value into r2.
+    __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+
+    // Load the double value of the parameter into d2, maybe converting the
+    // parameter to a number first using the ToNumberStub if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(r2, &convert_smi);
+    __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+    __ JumpIfRoot(r3, Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumberStub to convert it.
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(r0);
+      __ SmiTag(r4);
+      __ Push(r0, r1, r4);
+      __ mov(r0, r2);
+      ToNumberStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(r2, r0);
+      __ Pop(r0, r1, r4);
+      {
+        // Restore the double accumulator value (d1).
+        Label done_restore;
+        __ SmiToDouble(d1, r1);
+        __ JumpIfSmi(r1, &done_restore);
+        __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+        __ bind(&done_restore);
+      }
+      __ SmiUntag(r4);
+      __ SmiUntag(r0);
+    }
+    __ b(&convert);
+    __ bind(&convert_number);
+    __ vldr(d2, FieldMemOperand(r2, HeapNumber::kValueOffset));
+    __ b(&done_convert);
+    __ bind(&convert_smi);
+    __ SmiToDouble(d2, r2);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (d1) and the next parameter value on the right hand side (d2).
+    Label compare_nan, compare_swap;
+    __ VFPCompareAndSetFlags(d1, d2);
+    __ b(cc_done, &loop);
+    __ b(cc_swap, &compare_swap);
+    __ b(vs, &compare_nan);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ VmovHigh(ip, reg);
+    __ cmp(ip, Operand(0x80000000));
+    __ b(ne, &loop);
+
+    // Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ vmov(d1, d2);
+    __ mov(r1, r2);
+    __ b(&loop);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    __ LoadRoot(r1, Heap::kNanValueRootIndex);
+    __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ b(&loop);
+  }
+
+  __ bind(&done_loop);
+  __ mov(r0, r1);
+  __ Drop(r4);
+  __ Ret();
+}
+
+// static
 void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0                     : number of arguments
@@ -227,8 +329,9 @@
   __ bind(&new_object);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ Push(r2, r1, r3);  // first argument, constructor, new target
-    __ CallRuntime(Runtime::kNewObject);
+    __ Push(r2);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(r2);
   }
   __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
@@ -352,8 +455,9 @@
   __ bind(&new_object);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ Push(r2, r1, r3);  // first argument, constructor, new target
-    __ CallRuntime(Runtime::kNewObject);
+    __ Push(r2);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(r2);
   }
   __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
@@ -361,27 +465,6 @@
 }
 
 
-static void CallRuntimePassFunction(
-    MacroAssembler* masm, Runtime::FunctionId function_id) {
-  // ----------- S t a t e -------------
-  //  -- r1 : target function (preserved for callee)
-  //  -- r3 : new target (preserved for callee)
-  // -----------------------------------
-
-  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-  // Push a copy of the target function and the new target.
-  __ push(r1);
-  __ push(r3);
-  // Push function as parameter to the runtime call.
-  __ Push(r1);
-
-  __ CallRuntime(function_id, 1);
-  // Restore target function and new target.
-  __ pop(r3);
-  __ pop(r1);
-}
-
-
 static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
   __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
   __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
@@ -389,10 +472,35 @@
   __ Jump(r2);
 }
 
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- r0 : argument count (preserved for callee)
+  //  -- r1 : target function (preserved for callee)
+  //  -- r3 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    __ SmiTag(r0);
+    __ push(r0);
+    // Push a copy of the target function and the new target.
+    __ push(r1);
+    __ push(r3);
+    // Push function as parameter to the runtime call.
+    __ Push(r1);
 
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
-  __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(r0);
+    __ CallRuntime(function_id, 1);
+    __ mov(r2, r0);
+
+    // Restore target function and new target.
+    __ pop(r3);
+    __ pop(r1);
+    __ pop(r0);
+    __ SmiUntag(r0, r0);
+  }
+  __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(r2);
 }
 
 
@@ -407,8 +515,7 @@
   __ cmp(sp, Operand(ip));
   __ b(hs, &ok);
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
 
   __ bind(&ok);
   GenerateTailCallToSharedCode(masm);
@@ -417,7 +524,8 @@
 
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
-                                           bool create_implicit_receiver) {
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- r0     : number of arguments
   //  -- r1     : constructor function
@@ -435,155 +543,22 @@
 
     // Preserve the incoming parameters on the stack.
     __ AssertUndefinedOrAllocationSite(r2, r4);
-    __ push(r2);
     __ SmiTag(r0);
-    __ push(r0);
+    __ Push(r2, r0);
 
     if (create_implicit_receiver) {
-      // Try to allocate the object without transitioning into C code. If any of
-      // the preconditions is not met, the code bails out to the runtime call.
-      Label rt_call, allocated;
-      if (FLAG_inline_new) {
-        // Verify that the new target is a JSFunction.
-        __ CompareObjectType(r3, r5, r4, JS_FUNCTION_TYPE);
-        __ b(ne, &rt_call);
-
-        // Load the initial map and verify that it is in fact a map.
-        // r3: new target
-        __ ldr(r2,
-               FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
-        __ JumpIfSmi(r2, &rt_call);
-        __ CompareObjectType(r2, r5, r4, MAP_TYPE);
-        __ b(ne, &rt_call);
-
-        // Fall back to runtime if the expected base constructor and base
-        // constructor differ.
-        __ ldr(r5, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
-        __ cmp(r1, r5);
-        __ b(ne, &rt_call);
-
-        // Check that the constructor is not constructing a JSFunction (see
-        // comments in Runtime_NewObject in runtime.cc). In which case the
-        // initial map's instance type would be JS_FUNCTION_TYPE.
-        // r1: constructor function
-        // r2: initial map
-        // r3: new target
-        __ CompareInstanceType(r2, r5, JS_FUNCTION_TYPE);
-        __ b(eq, &rt_call);
-
-        // Now allocate the JSObject on the heap.
-        // r1: constructor function
-        // r2: initial map
-        // r3: new target
-        __ ldrb(r9, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-
-        __ Allocate(r9, r4, r9, r6, &rt_call, SIZE_IN_WORDS);
-
-        // Allocated the JSObject, now initialize the fields. Map is set to
-        // initial map and properties and elements are set to empty fixed array.
-        // r1: constructor function
-        // r2: initial map
-        // r3: new target
-        // r4: JSObject (not HeapObject tagged - the actual address).
-        // r9: start of next object
-        __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
-        __ mov(r5, r4);
-        STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
-        __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
-        STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
-        __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-        STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
-        __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-        STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
-
-        // Add the object tag to make the JSObject real, so that we can continue
-        // and jump into the continuation code at any time from now on.
-        __ add(r4, r4, Operand(kHeapObjectTag));
-
-        // Fill all the in-object properties with the appropriate filler.
-        // r4: JSObject (tagged)
-        // r5: First in-object property of JSObject (not tagged)
-        __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
-
-        if (!is_api_function) {
-          Label no_inobject_slack_tracking;
-
-          // Check if slack tracking is enabled.
-          MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
-          // Check if slack tracking is enabled.
-          __ ldr(r0, bit_field3);
-          __ DecodeField<Map::ConstructionCounter>(ip, r0);
-          // ip: slack tracking counter
-          __ cmp(ip, Operand(Map::kSlackTrackingCounterEnd));
-          __ b(lt, &no_inobject_slack_tracking);
-          __ push(ip);  // Save allocation count value.
-          // Decrease generous allocation count.
-          __ sub(r0, r0, Operand(1 << Map::ConstructionCounter::kShift));
-          __ str(r0, bit_field3);
-
-          // Allocate object with a slack.
-          __ ldr(r0, FieldMemOperand(r2, Map::kInstanceAttributesOffset));
-          __ Ubfx(r0, r0, Map::kUnusedPropertyFieldsByte * kBitsPerByte,
-                  kBitsPerByte);
-          __ sub(r0, r9, Operand(r0, LSL, kPointerSizeLog2));
-          // r0: offset of first field after pre-allocated fields
-          if (FLAG_debug_code) {
-            __ cmp(r5, r0);
-            __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
-          }
-          __ InitializeFieldsWithFiller(r5, r0, r6);
-
-          // To allow truncation fill the remaining fields with one pointer
-          // filler map.
-          __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
-          __ InitializeFieldsWithFiller(r5, r9, r6);
-
-          __ pop(r0);  // Restore allocation count value before decreasing.
-          __ cmp(r0, Operand(Map::kSlackTrackingCounterEnd));
-          __ b(ne, &allocated);
-
-          // Push the constructor, new_target and the object to the stack,
-          // and then the initial map as an argument to the runtime call.
-          __ Push(r1, r3, r4, r2);
-          __ CallRuntime(Runtime::kFinalizeInstanceSize);
-          __ Pop(r1, r3, r4);
-
-          // Continue with JSObject being successfully allocated
-          // r1: constructor function
-          // r3: new target
-          // r4: JSObject
-          __ jmp(&allocated);
-
-          __ bind(&no_inobject_slack_tracking);
-        }
-
-        __ InitializeFieldsWithFiller(r5, r9, r6);
-
-        // Continue with JSObject being successfully allocated
-        // r1: constructor function
-        // r3: new target
-        // r4: JSObject
-        __ jmp(&allocated);
-      }
-
-      // Allocate the new receiver object using the runtime call.
-      // r1: constructor function
-      // r3: new target
-      __ bind(&rt_call);
-
-      // Push the constructor and new_target twice, second pair as arguments
-      // to the runtime call.
+      // Allocate the new receiver object.
       __ Push(r1, r3);
-      __ Push(r1, r3);  // constructor function, new target
-      __ CallRuntime(Runtime::kNewObject);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
       __ mov(r4, r0);
       __ Pop(r1, r3);
 
-      // Receiver for constructor call allocated.
-      // r1: constructor function
-      // r3: new target
-      // r4: JSObject
-      __ bind(&allocated);
+      // ----------- S t a t e -------------
+      //  -- r1: constructor function
+      //  -- r3: new target
+      //  -- r4: newly allocated object
+      // -----------------------------------
 
       // Retrieve smi-tagged arguments count from the stack.
       __ ldr(r0, MemOperand(sp));
@@ -685,6 +660,19 @@
     // Leave construct frame.
   }
 
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(r0, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
   __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
   __ add(sp, sp, Operand(kPointerSize));
   if (create_implicit_receiver) {
@@ -695,17 +683,23 @@
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, true);
+  Generate_JSConstructStubHelper(masm, false, true, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true, true);
+  Generate_JSConstructStubHelper(masm, true, false, false);
 }
 
 
 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, false);
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
 
@@ -854,10 +848,8 @@
 //   o sp: stack pointer
 //   o lr: return address
 //
-// The function builds a JS frame.  Please see JavaScriptFrameConstants in
-// frames-arm.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -865,17 +857,19 @@
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ PushFixedFrame(r1);
   __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
-  __ push(r3);
-
-  // Push zero for bytecode array offset.
-  __ mov(r0, Operand(0));
-  __ push(r0);
 
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into kInterpreterBytecodeRegister.
   __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  DCHECK(!debug_info.is(r0));
+  __ ldr(debug_info, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
+  __ cmp(debug_info, Operand(DebugInfo::uninitialized()));
+  // Load original bytecode array or the debug copy.
   __ ldr(kInterpreterBytecodeArrayRegister,
-         FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+         FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset), eq);
+  __ ldr(kInterpreterBytecodeArrayRegister,
+         FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex), ne);
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -886,6 +880,10 @@
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Push new.target, bytecode array and zero for bytecode array offset.
+  __ mov(r0, Operand(0));
+  __ Push(r3, kInterpreterBytecodeArrayRegister, r0);
+
   // Allocate the local and temporary register file on the stack.
   {
     // Load frame size from the BytecodeArray object.
@@ -917,23 +915,9 @@
 
   // TODO(rmcilroy): List of things not currently dealt with here but done in
   // fullcodegen's prologue:
-  //  - Support profiler (specifically profiling_counter).
   //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Allow simulator stop operations if FLAG_stop_at is set.
   //  - Code aging of the BytecodeArray object.
 
-  // Perform stack guard check.
-  {
-    Label ok;
-    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-    __ cmp(sp, Operand(ip));
-    __ b(hs, &ok);
-    __ push(kInterpreterBytecodeArrayRegister);
-    __ CallRuntime(Runtime::kStackGuard);
-    __ pop(kInterpreterBytecodeArrayRegister);
-    __ bind(&ok);
-  }
-
   // Load accumulator, register file, bytecode offset, dispatch table into
   // registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -941,10 +925,9 @@
          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
   __ mov(kInterpreterBytecodeOffsetRegister,
          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
-         Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
 
   // Dispatch to the first bytecode handler for the function.
   __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
@@ -955,6 +938,9 @@
   // and header removal.
   __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Call(ip);
+
+  // Even though the first bytecode handler was called, we will never return.
+  __ Abort(kUnexpectedReturnFromBytecodeHandler);
 }
 
 
@@ -992,7 +978,8 @@
 
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- r0 : the number of arguments (not including the receiver)
   //  -- r2 : the address of the first argument to be pushed. Subsequent
@@ -1010,7 +997,9 @@
   Generate_InterpreterPushArgs(masm, r2, r3, r4);
 
   // Call the target.
-  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            tail_call_mode),
+          RelocInfo::CODE_TARGET);
 }
 
 
@@ -1039,47 +1028,24 @@
 }
 
 
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(kInterpreterAccumulatorRegister);  // Save accumulator register.
-
-    // Pass the deoptimization type to the runtime system.
-    __ mov(r1, Operand(Smi::FromInt(static_cast<int>(type))));
-    __ push(r1);
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-
-    __ pop(kInterpreterAccumulatorRegister);  // Restore accumulator register.
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use this for interpreter deopts).
-  __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
   // Initialize register file register and dispatch table register.
   __ add(kInterpreterRegisterFileRegister, fp,
          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
-         Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
 
   // Get the context from the frame.
-  // TODO(rmcilroy): Update interpreter frame to expect current context at the
-  // context slot instead of the function context.
   __ ldr(kContextRegister,
          MemOperand(kInterpreterRegisterFileRegister,
                     InterpreterFrameConstants::kContextFromRegisterPointer));
 
   // Get the bytecode array pointer from the frame.
-  __ ldr(r1,
-         MemOperand(kInterpreterRegisterFileRegister,
-                    InterpreterFrameConstants::kFunctionFromRegisterPointer));
-  __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(kInterpreterBytecodeArrayRegister,
-         FieldMemOperand(r1, SharedFunctionInfo::kFunctionDataOffset));
+  __ ldr(
+      kInterpreterBytecodeArrayRegister,
+      MemOperand(kInterpreterRegisterFileRegister,
+                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -1107,6 +1073,29 @@
 }
 
 
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+    MacroAssembler* masm, Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass the deoptimization type to the runtime system.
+    __ mov(r1, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(r1);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+    // Tear down internal frame.
+  }
+
+  // Drop state (we don't use these for interpreter deopts) and and pop the
+  // accumulator value into the accumulator register.
+  __ Drop(1);
+  __ Pop(kInterpreterAccumulatorRegister);
+
+  // Enter the bytecode dispatch.
+  Generate_EnterBytecodeDispatch(masm);
+}
+
+
 void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
 }
@@ -1121,22 +1110,30 @@
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the address of the interpreter entry trampoline as a return address.
+  // This simulates the initial call to bytecode handlers in interpreter entry
+  // trampoline. The return will never actually be taken, but our stack walker
+  // uses this address to determine whether a frame is interpreted.
+  __ Move(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+
+  Generate_EnterBytecodeDispatch(masm);
+}
+
 
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
 }
 
 
 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
 }
 
 
@@ -1342,14 +1339,11 @@
 
   // Load the next prototype.
   __ bind(&next_prototype);
-  __ ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
-  // End if the prototype is null or not hidden.
-  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
-  __ b(eq, receiver_check_failed);
-  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ ldr(ip, FieldMemOperand(map, Map::kBitField3Offset));
-  __ tst(ip, Operand(Map::IsHiddenPrototype::kMask));
+  __ tst(ip, Operand(Map::HasHiddenPrototype::kMask));
   __ b(eq, receiver_check_failed);
+  __ ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Iterate.
   __ b(&prototype_loop_start);
 
@@ -1829,9 +1823,7 @@
 
     // Try to create the list from an arguments object.
     __ bind(&create_arguments);
-    __ ldr(r2,
-           FieldMemOperand(r0, JSObject::kHeaderSize +
-                                   Heap::kArgumentsLengthIndex * kPointerSize));
+    __ ldr(r2, FieldMemOperand(r0, JSArgumentsObject::kLengthOffset));
     __ ldr(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
     __ ldr(ip, FieldMemOperand(r4, FixedArray::kLengthOffset));
     __ cmp(r2, ip);
@@ -1906,10 +1898,136 @@
   }
 }
 
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if the debugger is not active.
+  Label done;
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(masm->isolate());
+  __ mov(scratch1, Operand(debug_is_active));
+  __ ldrb(scratch1, MemOperand(scratch1));
+  __ cmp(scratch1, Operand(0));
+  __ b(ne, &done);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+    __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
+    __ b(ne, &no_interpreter_frame);
+    __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch3,
+         MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &no_arguments_adaptor);
+
+  // Drop arguments adaptor frame and load arguments count.
+  __ mov(fp, scratch2);
+  __ ldr(scratch1,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(scratch1);
+  __ b(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ldr(scratch1,
+         FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(scratch1,
+         FieldMemOperand(scratch1,
+                         SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(scratch1);
+
+  __ bind(&formal_parameter_count_loaded);
+
+  // Calculate the end of destination area where we will put the arguments
+  // after we drop current frame. We add kPointerSize to count the receiver
+  // argument which is not included into formal parameters count.
+  Register dst_reg = scratch2;
+  __ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
+  __ add(dst_reg, dst_reg,
+         Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+  Register src_reg = scratch1;
+  __ add(src_reg, sp, Operand(args_reg, LSL, kPointerSizeLog2));
+  // Count receiver argument as well (not included in args_reg).
+  __ add(src_reg, src_reg, Operand(kPointerSize));
+
+  if (FLAG_debug_code) {
+    __ cmp(src_reg, dst_reg);
+    __ Check(lo, kStackAccessBelowStackPointer);
+  }
+
+  // Restore caller's frame pointer and return address now as they will be
+  // overwritten by the copying loop.
+  __ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+
+  // Both src_reg and dst_reg are pointing to the word after the one to copy,
+  // so they must be pre-decremented in the loop.
+  Register tmp_reg = scratch3;
+  Label loop, entry;
+  __ b(&entry);
+  __ bind(&loop);
+  __ ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
+  __ str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
+  __ bind(&entry);
+  __ cmp(sp, src_reg);
+  __ b(ne, &loop);
+
+  // Leave current frame.
+  __ mov(sp, dst_reg);
+
+  __ bind(&done);
+}
+}  // namespace
 
 // static
 void Builtins::Generate_CallFunction(MacroAssembler* masm,
-                                     ConvertReceiverMode mode) {
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- r0 : the number of arguments (not including the receiver)
   //  -- r1 : the function to call (checked to be a JSFunction)
@@ -1995,6 +2113,10 @@
   //  -- cp : the function context.
   // -----------------------------------
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r0, r3, r4, r5);
+  }
+
   __ ldr(r2,
          FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
   __ SmiUntag(r2);
@@ -2093,13 +2215,18 @@
 
 
 // static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- r0 : the number of arguments (not including the receiver)
   //  -- r1 : the function to call (checked to be a JSBoundFunction)
   // -----------------------------------
   __ AssertBoundFunction(r1);
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r0, r3, r4, r5);
+  }
+
   // Patch the receiver to [[BoundThis]].
   __ ldr(ip, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
   __ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
@@ -2117,7 +2244,8 @@
 
 
 // static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- r0 : the number of arguments (not including the receiver)
   //  -- r1 : the target to call (can be any Object).
@@ -2127,14 +2255,25 @@
   __ JumpIfSmi(r1, &non_callable);
   __ bind(&non_smi);
   __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
-  __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
           RelocInfo::CODE_TARGET, eq);
   __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
-  __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
           RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Call]] internal method.
+  __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+  __ tst(r4, Operand(1 << Map::kIsCallable));
+  __ b(eq, &non_callable);
+
   __ cmp(r5, Operand(JS_PROXY_TYPE));
   __ b(ne, &non_function);
 
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r0, r3, r4, r5);
+  }
+
   // 1. Runtime fallback for Proxy [[Call]].
   __ Push(r1);
   // Increase the arguments size to include the pushed function and the
@@ -2147,16 +2286,12 @@
   // 2. Call to something else, which might have a [[Call]] internal method (if
   // not we raise an exception).
   __ bind(&non_function);
-  // Check if target has a [[Call]] internal method.
-  __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
-  __ tst(r4, Operand(1 << Map::kIsCallable));
-  __ b(eq, &non_callable);
   // Overwrite the original receiver the (original) target.
   __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
   // Let the "call_as_function_delegate" take care of the rest.
   __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
   __ Jump(masm->isolate()->builtins()->CallFunction(
-              ConvertReceiverMode::kNotNullOrUndefined),
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
           RelocInfo::CODE_TARGET);
 
   // 3. Call to something that is not callable.
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 2141333..82fb51d 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -92,9 +92,8 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
-                                          Condition cond, Strength strength);
+                                          Condition cond);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
                                     Register lhs,
                                     Register rhs,
@@ -238,7 +237,7 @@
 // Equality is almost reflexive (everything but NaN), so this is a test
 // for "identity and not NaN".
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
-                                          Condition cond, Strength strength) {
+                                          Condition cond) {
   Label not_identical;
   Label heap_number, return_equal;
   __ cmp(r0, r1);
@@ -258,14 +257,6 @@
     // Call runtime on identical SIMD values since we must throw a TypeError.
     __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
     __ b(eq, slow);
-    if (is_strong(strength)) {
-      // Call the runtime on anything that is converted in the semantics, since
-      // we need to throw a TypeError. Smis have already been ruled out.
-      __ cmp(r4, Operand(HEAP_NUMBER_TYPE));
-      __ b(eq, &return_equal);
-      __ tst(r4, Operand(kIsNotStringMask));
-      __ b(ne, slow);
-    }
   } else {
     __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
     __ b(eq, &heap_number);
@@ -279,13 +270,6 @@
       // Call runtime on identical SIMD values since we must throw a TypeError.
       __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
       __ b(eq, slow);
-      if (is_strong(strength)) {
-        // Call the runtime on anything that is converted in the semantics,
-        // since we need to throw a TypeError. Smis and heap numbers have
-        // already been ruled out.
-        __ tst(r4, Operand(kIsNotStringMask));
-        __ b(ne, slow);
-      }
       // Normally here we fall through to return_equal, but undefined is
       // special: (undefined == undefined) == true, but
       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
@@ -495,44 +479,52 @@
 
 // Fast negative check for internalized-to-internalized equality.
 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
-                                                     Register lhs,
-                                                     Register rhs,
+                                                     Register lhs, Register rhs,
                                                      Label* possible_strings,
-                                                     Label* not_both_strings) {
+                                                     Label* runtime_call) {
   DCHECK((lhs.is(r0) && rhs.is(r1)) ||
          (lhs.is(r1) && rhs.is(r0)));
 
   // r2 is object type of rhs.
-  Label object_test;
+  Label object_test, return_unequal, undetectable;
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   __ tst(r2, Operand(kIsNotStringMask));
   __ b(ne, &object_test);
   __ tst(r2, Operand(kIsNotInternalizedMask));
   __ b(ne, possible_strings);
   __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
-  __ b(ge, not_both_strings);
+  __ b(ge, runtime_call);
   __ tst(r3, Operand(kIsNotInternalizedMask));
   __ b(ne, possible_strings);
 
-  // Both are internalized.  We already checked they weren't the same pointer
-  // so they are not equal.
-  __ mov(r0, Operand(NOT_EQUAL));
+  // Both are internalized. We already checked they weren't the same pointer so
+  // they are not equal. Return non-equal by returning the non-zero object
+  // pointer in r0.
   __ Ret();
 
   __ bind(&object_test);
-  __ cmp(r2, Operand(FIRST_JS_RECEIVER_TYPE));
-  __ b(lt, not_both_strings);
-  __ CompareObjectType(lhs, r2, r3, FIRST_JS_RECEIVER_TYPE);
-  __ b(lt, not_both_strings);
-  // If both objects are undetectable, they are equal. Otherwise, they
-  // are not equal, since they are different objects and an object is not
-  // equal to undefined.
+  __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
   __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
-  __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
-  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
-  __ and_(r0, r2, Operand(r3));
-  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
-  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
+  __ ldrb(r4, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ ldrb(r5, FieldMemOperand(r3, Map::kBitFieldOffset));
+  __ tst(r4, Operand(1 << Map::kIsUndetectable));
+  __ b(ne, &undetectable);
+  __ tst(r5, Operand(1 << Map::kIsUndetectable));
+  __ b(ne, &return_unequal);
+
+  __ CompareInstanceType(r2, r2, FIRST_JS_RECEIVER_TYPE);
+  __ b(lt, runtime_call);
+  __ CompareInstanceType(r3, r3, FIRST_JS_RECEIVER_TYPE);
+  __ b(lt, runtime_call);
+
+  __ bind(&return_unequal);
+  // Return non-equal by returning the non-zero object pointer in r0.
+  __ Ret();
+
+  __ bind(&undetectable);
+  __ tst(r5, Operand(1 << Map::kIsUndetectable));
+  __ b(eq, &return_unequal);
+  __ mov(r0, Operand(EQUAL));
   __ Ret();
 }
 
@@ -583,7 +575,7 @@
 
   // Handle the case where the objects are identical.  Either returns the answer
   // or goes to slow.  Only falls through if the objects were not identical.
-  EmitIdenticalObjectComparison(masm, &slow, cc, strength());
+  EmitIdenticalObjectComparison(masm, &slow, cc);
 
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
@@ -679,11 +671,19 @@
 
   __ bind(&slow);
 
-  __ Push(lhs, rhs);
-  // Figure out which native to call and setup the arguments.
   if (cc == eq) {
-    __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(lhs, rhs);
+      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+    }
+    // Turn true into 0 and false into some non-zero value.
+    STATIC_ASSERT(EQUAL == 0);
+    __ LoadRoot(r1, Heap::kTrueValueRootIndex);
+    __ sub(r0, r0, r1);
+    __ Ret();
   } else {
+    __ Push(lhs, rhs);
     int ncr;  // NaN compare result
     if (cc == lt || cc == le) {
       ncr = GREATER;
@@ -696,8 +696,7 @@
 
     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
     // tagged as a small integer.
-    __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
-                                             : Runtime::kCompare);
+    __ TailCallRuntime(Runtime::kCompare);
   }
 
   __ bind(&miss);
@@ -895,7 +894,6 @@
   __ vcvt_f64_s32(double_exponent, single_scratch);
 
   // Returning or bailing out.
-  Counters* counters = isolate()->counters();
   if (exponent_type() == ON_STACK) {
     // The arguments are still on the stack.
     __ bind(&call_runtime);
@@ -909,7 +907,6 @@
     __ vstr(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     DCHECK(heapnumber.is(r0));
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
     __ Ret(2);
   } else {
     __ push(lr);
@@ -925,7 +922,6 @@
     __ MovFromFloatResult(double_result);
 
     __ bind(&done);
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
     __ Ret();
   }
 }
@@ -999,11 +995,9 @@
   // r1: pointer to the first argument (C callee-saved)
   // r5: pointer to builtin function  (C callee-saved)
 
-  // Result returned in r0 or r0+r1 by default.
-
-#if V8_HOST_ARCH_ARM
   int frame_alignment = MacroAssembler::ActivationFrameAlignment();
   int frame_alignment_mask = frame_alignment - 1;
+#if V8_HOST_ARCH_ARM
   if (FLAG_debug_code) {
     if (frame_alignment > kPointerSize) {
       Label alignment_as_expected;
@@ -1018,8 +1012,25 @@
 #endif
 
   // Call C built-in.
-  // r0 = argc, r1 = argv
-  __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+  int result_stack_size;
+  if (result_size() <= 2) {
+    // r0 = argc, r1 = argv, r2 = isolate
+    __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+    result_stack_size = 0;
+  } else {
+    DCHECK_EQ(3, result_size());
+    // Allocate additional space for the result.
+    result_stack_size =
+        ((result_size() * kPointerSize) + frame_alignment_mask) &
+        ~frame_alignment_mask;
+    __ sub(sp, sp, Operand(result_stack_size));
+
+    // r0 = hidden result argument, r1 = argc, r2 = argv, r3 = isolate.
+    __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+    __ mov(r2, Operand(r1));
+    __ mov(r1, Operand(r0));
+    __ mov(r0, Operand(sp));
+  }
 
   // To let the GC traverse the return address of the exit frames, we need to
   // know where the return address is. The CEntryStub is unmovable, so
@@ -1032,11 +1043,19 @@
     // Prevent literal pool emission before return address.
     Assembler::BlockConstPoolScope block_const_pool(masm);
     __ add(lr, pc, Operand(4));
-    __ str(lr, MemOperand(sp, 0));
+    __ str(lr, MemOperand(sp, result_stack_size));
     __ Call(r5);
   }
+  if (result_size() > 2) {
+    DCHECK_EQ(3, result_size());
+    // Read result values stored on stack.
+    __ ldr(r2, MemOperand(r0, 2 * kPointerSize));
+    __ ldr(r1, MemOperand(r0, 1 * kPointerSize));
+    __ ldr(r0, MemOperand(r0, 0 * kPointerSize));
+  }
+  // Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
 
-  __ VFPEnsureFPSCRState(r2);
+  __ VFPEnsureFPSCRState(r3);
 
   // Check result for exception sentinel.
   Label exception_returned;
@@ -1049,9 +1068,9 @@
     Label okay;
     ExternalReference pending_exception_address(
         Isolate::kPendingExceptionAddress, isolate());
-    __ mov(r2, Operand(pending_exception_address));
-    __ ldr(r2, MemOperand(r2));
-    __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+    __ mov(r3, Operand(pending_exception_address));
+    __ ldr(r3, MemOperand(r3));
+    __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
     // Cannot use check here as it attempts to generate call into runtime.
     __ b(eq, &okay);
     __ stop("Unexpected pending exception");
@@ -1461,286 +1480,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  // The displacement is the offset of the last parameter (if any)
-  // relative to the frame pointer.
-  const int kDisplacement =
-      StandardFrameConstants::kCallerSPOffset - kPointerSize;
-  DCHECK(r1.is(ArgumentsAccessReadDescriptor::index()));
-  DCHECK(r0.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
-  // Check that the key is a smi.
-  Label slow;
-  __ JumpIfNotSmi(r1, &slow);
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor;
-  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
-  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(eq, &adaptor);
-
-  // Check index against formal parameters count limit passed in
-  // through register r0. Use unsigned comparison to get negative
-  // check for free.
-  __ cmp(r1, r0);
-  __ b(hs, &slow);
-
-  // Read the argument from the stack and return it.
-  __ sub(r3, r0, r1);
-  __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
-  __ ldr(r0, MemOperand(r3, kDisplacement));
-  __ Jump(lr);
-
-  // Arguments adaptor case: Check index against actual arguments
-  // limit found in the arguments adaptor frame. Use unsigned
-  // comparison to get negative check for free.
-  __ bind(&adaptor);
-  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ cmp(r1, r0);
-  __ b(cs, &slow);
-
-  // Read the argument from the adaptor frame and return it.
-  __ sub(r3, r0, r1);
-  __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
-  __ ldr(r0, MemOperand(r3, kDisplacement));
-  __ Jump(lr);
-
-  // Slow-case: Handle non-smi or out-of-bounds access to arguments
-  // by calling the runtime system.
-  __ bind(&slow);
-  __ push(r1);
-  __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
-  // r1 : function
-  // r2 : number of parameters (tagged)
-  // r3 : parameters pointer
-
-  DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
-  __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
-  __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(ne, &runtime);
-
-  // Patch the arguments.length and the parameters pointer in the current frame.
-  __ ldr(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ add(r4, r4, Operand(r2, LSL, 1));
-  __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  __ bind(&runtime);
-  __ Push(r1, r3, r2);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
-  // r1 : function
-  // r2 : number of parameters (tagged)
-  // r3 : parameters pointer
-  // Registers used over whole function:
-  //  r5 : arguments count (tagged)
-  //  r6 : mapped parameter count (tagged)
-
-  DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
-  __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(eq, &adaptor_frame);
-
-  // No adaptor, parameter count = argument count.
-  __ mov(r5, r2);
-  __ mov(r6, r2);
-  __ b(&try_allocate);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ add(r4, r4, Operand(r5, LSL, 1));
-  __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // r5 = argument count (tagged)
-  // r6 = parameter count (tagged)
-  // Compute the mapped parameter count = min(r6, r5) in r6.
-  __ mov(r6, r2);
-  __ cmp(r6, Operand(r5));
-  __ mov(r6, Operand(r5), LeaveCC, gt);
-
-  __ bind(&try_allocate);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  // If there are no mapped parameters, we do not need the parameter_map.
-  __ cmp(r6, Operand(Smi::FromInt(0)));
-  __ mov(r9, Operand::Zero(), LeaveCC, eq);
-  __ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
-  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
-
-  // 2. Backing store.
-  __ add(r9, r9, Operand(r5, LSL, 1));
-  __ add(r9, r9, Operand(FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
-
-  // r0 = address of new object(s) (tagged)
-  // r2 = argument count (smi-tagged)
-  // Get the arguments boilerplate from the current native context into r4.
-  const int kNormalOffset =
-      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
-  const int kAliasedOffset =
-      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
-  __ ldr(r4, NativeContextMemOperand());
-  __ cmp(r6, Operand::Zero());
-  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
-  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
-
-  // r0 = address of new object (tagged)
-  // r2 = argument count (smi-tagged)
-  // r4 = address of arguments map (tagged)
-  // r6 = mapped parameter count (tagged)
-  __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
-  __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
-  __ str(r9, FieldMemOperand(r0, JSObject::kPropertiesOffset));
-  __ str(r9, FieldMemOperand(r0, JSObject::kElementsOffset));
-
-  // Set up the callee in-object property.
-  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
-  __ AssertNotSmi(r1);
-  const int kCalleeOffset = JSObject::kHeaderSize +
-      Heap::kArgumentsCalleeIndex * kPointerSize;
-  __ str(r1, FieldMemOperand(r0, kCalleeOffset));
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(r5);
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  const int kLengthOffset = JSObject::kHeaderSize +
-      Heap::kArgumentsLengthIndex * kPointerSize;
-  __ str(r5, FieldMemOperand(r0, kLengthOffset));
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, r4 will point there, otherwise
-  // it will point to the backing store.
-  __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
-  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
-
-  // r0 = address of new object (tagged)
-  // r2 = argument count (tagged)
-  // r4 = address of parameter map or backing store (tagged)
-  // r6 = mapped parameter count (tagged)
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ cmp(r6, Operand(Smi::FromInt(0)));
-  // Move backing store address to r1, because it is
-  // expected there when filling in the unmapped arguments.
-  __ mov(r1, r4, LeaveCC, eq);
-  __ b(eq, &skip_parameter_map);
-
-  __ LoadRoot(r5, Heap::kSloppyArgumentsElementsMapRootIndex);
-  __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
-  __ add(r5, r6, Operand(Smi::FromInt(2)));
-  __ str(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
-  __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
-  __ add(r5, r4, Operand(r6, LSL, 1));
-  __ add(r5, r5, Operand(kParameterMapHeaderSize));
-  __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop, parameters_test;
-  __ mov(r5, r6);
-  __ add(r9, r2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
-  __ sub(r9, r9, Operand(r6));
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ add(r1, r4, Operand(r5, LSL, 1));
-  __ add(r1, r1, Operand(kParameterMapHeaderSize));
-
-  // r1 = address of backing store (tagged)
-  // r4 = address of parameter map (tagged), which is also the address of new
-  //      object + Heap::kSloppyArgumentsObjectSize (tagged)
-  // r0 = temporary scratch (a.o., for address calculation)
-  // r5 = loop variable (tagged)
-  // ip = the hole value
-  __ jmp(&parameters_test);
-
-  __ bind(&parameters_loop);
-  __ sub(r5, r5, Operand(Smi::FromInt(1)));
-  __ mov(r0, Operand(r5, LSL, 1));
-  __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-  __ str(r9, MemOperand(r4, r0));
-  __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
-  __ str(ip, MemOperand(r1, r0));
-  __ add(r9, r9, Operand(Smi::FromInt(1)));
-  __ bind(&parameters_test);
-  __ cmp(r5, Operand(Smi::FromInt(0)));
-  __ b(ne, &parameters_loop);
-
-  // Restore r0 = new object (tagged) and r5 = argument count (tagged).
-  __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
-  __ ldr(r5, FieldMemOperand(r0, kLengthOffset));
-
-  __ bind(&skip_parameter_map);
-  // r0 = address of new object (tagged)
-  // r1 = address of backing store (tagged)
-  // r5 = argument count (tagged)
-  // r6 = mapped parameter count (tagged)
-  // r9 = scratch
-  // Copy arguments header and remaining slots (if there are any).
-  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
-  __ str(r9, FieldMemOperand(r1, FixedArray::kMapOffset));
-  __ str(r5, FieldMemOperand(r1, FixedArray::kLengthOffset));
-
-  Label arguments_loop, arguments_test;
-  __ sub(r3, r3, Operand(r6, LSL, 1));
-  __ jmp(&arguments_test);
-
-  __ bind(&arguments_loop);
-  __ sub(r3, r3, Operand(kPointerSize));
-  __ ldr(r4, MemOperand(r3, 0));
-  __ add(r9, r1, Operand(r6, LSL, 1));
-  __ str(r4, FieldMemOperand(r9, FixedArray::kHeaderSize));
-  __ add(r6, r6, Operand(Smi::FromInt(1)));
-
-  __ bind(&arguments_test);
-  __ cmp(r6, Operand(r5));
-  __ b(lt, &arguments_loop);
-
-  // Return.
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  // r0 = address of new object (tagged)
-  // r5 = argument count (tagged)
-  __ bind(&runtime);
-  __ Push(r1, r3, r5);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
   // Return address is in lr.
   Label slow;
@@ -1764,117 +1503,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
-  // r1 : function
-  // r2 : number of parameters (tagged)
-  // r3 : parameters pointer
-
-  DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label try_allocate, runtime;
-  __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
-  __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(ne, &try_allocate);
-
-  // Patch the arguments.length and the parameters pointer.
-  __ ldr(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ add(r4, r4, Operand::PointerOffsetFromSmiKey(r2));
-  __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Try the new space allocation. Start out with computing the size
-  // of the arguments object and the elements array in words.
-  Label add_arguments_object;
-  __ bind(&try_allocate);
-  __ SmiUntag(r9, r2, SetCC);
-  __ b(eq, &add_arguments_object);
-  __ add(r9, r9, Operand(FixedArray::kHeaderSize / kPointerSize));
-  __ bind(&add_arguments_object);
-  __ add(r9, r9, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
-
-  // Do the allocation of both objects in one go.
-  __ Allocate(r9, r0, r4, r5, &runtime,
-              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
-  // Get the arguments boilerplate from the current native context.
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
-
-  __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
-  __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
-  __ str(r5, FieldMemOperand(r0, JSObject::kPropertiesOffset));
-  __ str(r5, FieldMemOperand(r0, JSObject::kElementsOffset));
-
-  // Get the length (smi tagged) and set that as an in-object property too.
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  __ AssertSmi(r2);
-  __ str(r2,
-         FieldMemOperand(r0, JSObject::kHeaderSize +
-                                 Heap::kArgumentsLengthIndex * kPointerSize));
-
-  // If there are no actual arguments, we're done.
-  Label done;
-  __ cmp(r2, Operand::Zero());
-  __ b(eq, &done);
-
-  // Set up the elements pointer in the allocated arguments object and
-  // initialize the header in the elements fixed array.
-  __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
-  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
-  __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
-  __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
-  __ str(r2, FieldMemOperand(r4, FixedArray::kLengthOffset));
-  __ SmiUntag(r2);
-
-  // Copy the fixed array slots.
-  Label loop;
-  // Set up r4 to point to the first array slot.
-  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ bind(&loop);
-  // Pre-decrement r3 with kPointerSize on each iteration.
-  // Pre-decrement in order to skip receiver.
-  __ ldr(r5, MemOperand(r3, kPointerSize, NegPreIndex));
-  // Post-increment r4 with kPointerSize on each iteration.
-  __ str(r5, MemOperand(r4, kPointerSize, PostIndex));
-  __ sub(r2, r2, Operand(1));
-  __ cmp(r2, Operand::Zero());
-  __ b(ne, &loop);
-
-  // Return.
-  __ bind(&done);
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ Push(r1, r3, r2);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
-  // r2 : number of parameters (tagged)
-  // r3 : parameters pointer
-  // r4 : rest parameter index (tagged)
-
-  Label runtime;
-  __ ldr(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r0, MemOperand(r5, StandardFrameConstants::kContextOffset));
-  __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(ne, &runtime);
-
-  // Patch the arguments.length and the parameters pointer.
-  __ ldr(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ add(r3, r5, Operand::PointerOffsetFromSmiKey(r2));
-  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  __ bind(&runtime);
-  __ Push(r2, r3, r4);
-  __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -1953,34 +1581,33 @@
   __ ldr(subject, MemOperand(sp, kSubjectOffset));
   __ JumpIfSmi(subject, &runtime);
   __ mov(r3, subject);  // Make a copy of the original subject string.
-  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   // subject: subject string
   // r3: subject string
-  // r0: subject string instance type
   // regexp_data: RegExp data (FixedArray)
   // Handle subject string according to its encoding and representation:
-  // (1) Sequential string?  If yes, go to (5).
-  // (2) Anything but sequential or cons?  If yes, go to (6).
-  // (3) Cons string.  If the string is flat, replace subject with first string.
-  //     Otherwise bailout.
-  // (4) Is subject external?  If yes, go to (7).
-  // (5) Sequential string.  Load regexp code according to encoding.
+  // (1) Sequential string?  If yes, go to (4).
+  // (2) Sequential or cons?  If not, go to (5).
+  // (3) Cons string.  If the string is flat, replace subject with first string
+  //     and go to (1). Otherwise bail out to runtime.
+  // (4) Sequential string.  Load regexp code according to encoding.
   // (E) Carry on.
   /// [...]
 
   // Deferred code at the end of the stub:
-  // (6) Not a long external string?  If yes, go to (8).
-  // (7) External string.  Make it, offset-wise, look like a sequential string.
-  //     Go to (5).
-  // (8) Short external string or not a string?  If yes, bail out to runtime.
-  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+  // (5) Long external string?  If not, go to (7).
+  // (6) External string.  Make it, offset-wise, look like a sequential string.
+  //     Go to (4).
+  // (7) Short external string or not a string?  If yes, bail out to runtime.
+  // (8) Sliced string.  Replace subject with parent.  Go to (1).
 
-  Label seq_string /* 5 */, external_string /* 7 */,
-        check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
-        not_long_external /* 8 */;
+  Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
+      not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
 
-  // (1) Sequential string?  If yes, go to (5).
+  __ bind(&check_underlying);
+  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+
+  // (1) Sequential string?  If yes, go to (4).
   __ and_(r1,
           r0,
           Operand(kIsNotStringMask |
@@ -1988,15 +1615,15 @@
                   kShortExternalStringMask),
           SetCC);
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
-  __ b(eq, &seq_string);  // Go to (5).
+  __ b(eq, &seq_string);  // Go to (4).
 
-  // (2) Anything but sequential or cons?  If yes, go to (6).
+  // (2) Sequential or cons?  If not, go to (5).
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmp(r1, Operand(kExternalStringTag));
-  __ b(ge, &not_seq_nor_cons);  // Go to (6).
+  __ b(ge, &not_seq_nor_cons);  // Go to (5).
 
   // (3) Cons string.  Check that it's flat.
   // Replace subject with first string and reload instance type.
@@ -2004,19 +1631,9 @@
   __ CompareRoot(r0, Heap::kempty_stringRootIndex);
   __ b(ne, &runtime);
   __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  __ jmp(&check_underlying);
 
-  // (4) Is subject external?  If yes, go to (7).
-  __ bind(&check_underlying);
-  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(r0, Operand(kStringRepresentationMask));
-  // The underlying external string is never a short external string.
-  STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
-  STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
-  __ b(ne, &external_string);  // Go to (7).
-
-  // (5) Sequential string.  Load regexp code according to encoding.
+  // (4) Sequential string.  Load regexp code according to encoding.
   __ bind(&seq_string);
   // subject: sequential subject string (or look-alike, external string)
   // r3: original subject string
@@ -2249,12 +1866,12 @@
   __ TailCallRuntime(Runtime::kRegExpExec);
 
   // Deferred code for string handling.
-  // (6) Not a long external string?  If yes, go to (8).
+  // (5) Long external string?  If not, go to (7).
   __ bind(&not_seq_nor_cons);
   // Compare flags are still set.
-  __ b(gt, &not_long_external);  // Go to (8).
+  __ b(gt, &not_long_external);  // Go to (7).
 
-  // (7) External string.  Make it, offset-wise, look like a sequential string.
+  // (6) External string.  Make it, offset-wise, look like a sequential string.
   __ bind(&external_string);
   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
@@ -2271,15 +1888,15 @@
   __ sub(subject,
          subject,
          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ jmp(&seq_string);    // Go to (5).
+  __ jmp(&seq_string);  // Go to (4).
 
-  // (8) Short external string or not a string?  If yes, bail out to runtime.
+  // (7) Short external string or not a string?  If yes, bail out to runtime.
   __ bind(&not_long_external);
   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
   __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
   __ b(ne, &runtime);
 
-  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+  // (8) Sliced string.  Replace subject with parent.  Go to (4).
   // Load offset into r9 and replace subject string with parent.
   __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ SmiUntag(r9);
@@ -2506,7 +2123,8 @@
 
   __ bind(&call_function);
   __ mov(r0, Operand(argc));
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+                                                    tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&extra_checks_or_miss);
@@ -2545,7 +2163,7 @@
 
   __ bind(&call);
   __ mov(r0, Operand(argc));
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&uninitialized);
@@ -3096,6 +2714,37 @@
 }
 
 
+void ToNameStub::Generate(MacroAssembler* masm) {
+  // The ToName stub takes one argument in r0.
+  Label is_number;
+  __ JumpIfSmi(r0, &is_number);
+
+  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+  __ CompareObjectType(r0, r1, r1, LAST_NAME_TYPE);
+  // r0: receiver
+  // r1: receiver instance type
+  __ Ret(ls);
+
+  Label not_heap_number;
+  __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
+  __ b(ne, &not_heap_number);
+  __ bind(&is_number);
+  NumberToStringStub stub(isolate());
+  __ TailCallStub(&stub);
+  __ bind(&not_heap_number);
+
+  Label not_oddball;
+  __ cmp(r1, Operand(ODDBALL_TYPE));
+  __ b(ne, &not_oddball);
+  __ ldr(r0, FieldMemOperand(r0, Oddball::kToStringOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+
+  __ push(r0);  // Push argument.
+  __ TailCallRuntime(Runtime::kToName);
+}
+
+
 void StringHelper::GenerateFlatOneByteStringEquals(
     MacroAssembler* masm, Register left, Register right, Register scratch1,
     Register scratch2, Register scratch3) {
@@ -3263,18 +2912,14 @@
 
   __ CheckMap(r1, r2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   __ CheckMap(r0, r3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
-  if (op() != Token::EQ_STRICT && is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
-  } else {
-    if (!Token::IsEqualityOp(op())) {
-      __ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
-      __ AssertSmi(r1);
-      __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
-      __ AssertSmi(r0);
-    }
-    __ sub(r0, r1, r0);
-    __ Ret();
+  if (!Token::IsEqualityOp(op())) {
+    __ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
+    __ AssertSmi(r1);
+    __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
+    __ AssertSmi(r0);
   }
+  __ sub(r0, r1, r0);
+  __ Ret();
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -3354,7 +2999,7 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
                      CompareICState::GENERIC, CompareICState::GENERIC);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
@@ -3572,8 +3217,6 @@
   if (Token::IsEqualityOp(op())) {
     __ sub(r0, r0, Operand(r1));
     __ Ret();
-  } else if (is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   } else {
     if (op() == Token::LT || op() == Token::LTE) {
       __ mov(r2, Operand(Smi::FromInt(GREATER)));
@@ -3939,11 +3582,8 @@
                            regs_.scratch0(),
                            &dont_need_remembered_set);
 
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch0(),
-                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                     ne,
-                     &dont_need_remembered_set);
+    __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+                        &dont_need_remembered_set);
 
     // First notify the incremental marker if necessary, then update the
     // remembered set.
@@ -4918,6 +4558,584 @@
 }
 
 
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r1 : target
+  //  -- r3 : new target
+  //  -- cp : context
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r1);
+  __ AssertReceiver(r3);
+
+  // Verify that the new target is a JSFunction.
+  Label new_object;
+  __ CompareObjectType(r3, r2, r2, JS_FUNCTION_TYPE);
+  __ b(ne, &new_object);
+
+  // Load the initial map and verify that it's in fact a map.
+  __ ldr(r2, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+  __ JumpIfSmi(r2, &new_object);
+  __ CompareObjectType(r2, r0, r0, MAP_TYPE);
+  __ b(ne, &new_object);
+
+  // Fall back to runtime if the target differs from the new target's
+  // initial map constructor.
+  __ ldr(r0, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
+  __ cmp(r0, r1);
+  __ b(ne, &new_object);
+
+  // Allocate the JSObject on the heap.
+  Label allocate, done_allocate;
+  __ ldrb(r4, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+  __ Allocate(r4, r0, r5, r6, &allocate, SIZE_IN_WORDS);
+  __ bind(&done_allocate);
+
+  // Initialize the JSObject fields.
+  __ str(r2, MemOperand(r0, JSObject::kMapOffset));
+  __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+  __ str(r3, MemOperand(r0, JSObject::kPropertiesOffset));
+  __ str(r3, MemOperand(r0, JSObject::kElementsOffset));
+  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+  __ add(r1, r0, Operand(JSObject::kHeaderSize));
+
+  // ----------- S t a t e -------------
+  //  -- r0 : result (untagged)
+  //  -- r1 : result fields (untagged)
+  //  -- r5 : result end (untagged)
+  //  -- r2 : initial map
+  //  -- cp : context
+  //  -- lr : return address
+  // -----------------------------------
+
+  // Perform in-object slack tracking if requested.
+  Label slack_tracking;
+  STATIC_ASSERT(Map::kNoSlackTracking == 0);
+  __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+  __ ldr(r3, FieldMemOperand(r2, Map::kBitField3Offset));
+  __ tst(r3, Operand(Map::ConstructionCounter::kMask));
+  __ b(ne, &slack_tracking);
+  {
+    // Initialize all in-object fields with undefined.
+    __ InitializeFieldsWithFiller(r1, r5, r6);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ add(r0, r0, Operand(kHeapObjectTag));
+    __ Ret();
+  }
+  __ bind(&slack_tracking);
+  {
+    // Decrease generous allocation count.
+    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+    __ sub(r3, r3, Operand(1 << Map::ConstructionCounter::kShift));
+    __ str(r3, FieldMemOperand(r2, Map::kBitField3Offset));
+
+    // Initialize the in-object fields with undefined.
+    __ ldrb(r4, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
+    __ sub(r4, r5, Operand(r4, LSL, kPointerSizeLog2));
+    __ InitializeFieldsWithFiller(r1, r4, r6);
+
+    // Initialize the remaining (reserved) fields with one pointer filler map.
+    __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
+    __ InitializeFieldsWithFiller(r1, r5, r6);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ add(r0, r0, Operand(kHeapObjectTag));
+
+    // Check if we can finalize the instance size.
+    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+    __ tst(r3, Operand(Map::ConstructionCounter::kMask));
+    __ Ret(ne);
+
+    // Finalize the instance size.
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r0, r2);
+      __ CallRuntime(Runtime::kFinalizeInstanceSize);
+      __ Pop(r0);
+    }
+    __ Ret();
+  }
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize == 1);
+    __ mov(r4, Operand(r4, LSL, kPointerSizeLog2 + 1));
+    __ Push(r2, r4);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Pop(r2);
+  }
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ sub(r0, r0, Operand(kHeapObjectTag));
+  __ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+  __ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
+  __ b(&done_allocate);
+
+  // Fall back to %NewObject.
+  __ bind(&new_object);
+  __ Push(r1, r3);
+  __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r1);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make r2 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ mov(r2, fp);
+    __ b(&loop_entry);
+    __ bind(&loop);
+    __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+    __ cmp(ip, r1);
+    __ b(ne, &loop);
+  }
+
+  // Check if we have rest parameters (only possible if we have an
+  // arguments adaptor frame below the function frame).
+  Label no_rest_parameters;
+  __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(ip, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &no_rest_parameters);
+
+  // Check if the arguments adaptor frame contains more arguments than
+  // specified by the function's internal formal parameter count.
+  Label rest_parameters;
+  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r1,
+         FieldMemOperand(r1, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ sub(r0, r0, r1, SetCC);
+  __ b(gt, &rest_parameters);
+
+  // Return an empty rest parameter array.
+  __ bind(&no_rest_parameters);
+  {
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- lr : return address
+    // -----------------------------------
+
+    // Allocate an empty rest parameter array.
+    Label allocate, done_allocate;
+    __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the rest parameter array in r0.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
+    __ str(r1, FieldMemOperand(r0, JSArray::kMapOffset));
+    __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+    __ str(r1, FieldMemOperand(r0, JSArray::kPropertiesOffset));
+    __ str(r1, FieldMemOperand(r0, JSArray::kElementsOffset));
+    __ mov(r1, Operand(0));
+    __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(Smi::FromInt(JSArray::kSize));
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+    }
+    __ jmp(&done_allocate);
+  }
+
+  __ bind(&rest_parameters);
+  {
+    // Compute the pointer to the first rest parameter (skippping the receiver).
+    __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
+    __ add(r2, r2,
+           Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- r0 : number of rest parameters (tagged)
+    //  -- r2 : pointer to first rest parameters
+    //  -- lr : return address
+    // -----------------------------------
+
+    // Allocate space for the rest parameter array plus the backing store.
+    Label allocate, done_allocate;
+    __ mov(r1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+    __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
+    __ Allocate(r1, r3, r4, r5, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the elements array in r3.
+    __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
+    __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
+    __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
+    __ add(r4, r3, Operand(FixedArray::kHeaderSize));
+    {
+      Label loop, done_loop;
+      __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
+      __ bind(&loop);
+      __ cmp(r4, r1);
+      __ b(eq, &done_loop);
+      __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
+      __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
+      __ add(r4, r4, Operand(1 * kPointerSize));
+      __ b(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Setup the rest parameter array in r4.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
+    __ str(r1, FieldMemOperand(r4, JSArray::kMapOffset));
+    __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+    __ str(r1, FieldMemOperand(r4, JSArray::kPropertiesOffset));
+    __ str(r3, FieldMemOperand(r4, JSArray::kElementsOffset));
+    __ str(r0, FieldMemOperand(r4, JSArray::kLengthOffset));
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ mov(r0, r4);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(r1);
+      __ Push(r0, r2, r1);
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+      __ mov(r3, r0);
+      __ Pop(r0, r2);
+    }
+    __ jmp(&done_allocate);
+  }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r1);
+
+  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r2,
+         FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ add(r3, fp, Operand(r2, LSL, kPointerSizeLog2 - 1));
+  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // r1 : function
+  // r2 : number of parameters (tagged)
+  // r3 : parameters pointer
+  // Registers used over whole function:
+  //  r5 : arguments count (tagged)
+  //  r6 : mapped parameter count (tagged)
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
+  __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(eq, &adaptor_frame);
+
+  // No adaptor, parameter count = argument count.
+  __ mov(r5, r2);
+  __ mov(r6, r2);
+  __ b(&try_allocate);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ add(r4, r4, Operand(r5, LSL, 1));
+  __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // r5 = argument count (tagged)
+  // r6 = parameter count (tagged)
+  // Compute the mapped parameter count = min(r6, r5) in r6.
+  __ mov(r6, r2);
+  __ cmp(r6, Operand(r5));
+  __ mov(r6, Operand(r5), LeaveCC, gt);
+
+  __ bind(&try_allocate);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  // If there are no mapped parameters, we do not need the parameter_map.
+  __ cmp(r6, Operand(Smi::FromInt(0)));
+  __ mov(r9, Operand::Zero(), LeaveCC, eq);
+  __ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
+  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
+
+  // 2. Backing store.
+  __ add(r9, r9, Operand(r5, LSL, 1));
+  __ add(r9, r9, Operand(FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ add(r9, r9, Operand(JSSloppyArgumentsObject::kSize));
+
+  // Do the allocation of all three objects in one go.
+  __ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
+
+  // r0 = address of new object(s) (tagged)
+  // r2 = argument count (smi-tagged)
+  // Get the arguments boilerplate from the current native context into r4.
+  const int kNormalOffset =
+      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+  const int kAliasedOffset =
+      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+  __ ldr(r4, NativeContextMemOperand());
+  __ cmp(r6, Operand::Zero());
+  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
+  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
+
+  // r0 = address of new object (tagged)
+  // r2 = argument count (smi-tagged)
+  // r4 = address of arguments map (tagged)
+  // r6 = mapped parameter count (tagged)
+  __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
+  __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
+  __ str(r9, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+  __ str(r9, FieldMemOperand(r0, JSObject::kElementsOffset));
+
+  // Set up the callee in-object property.
+  __ AssertNotSmi(r1);
+  __ str(r1, FieldMemOperand(r0, JSSloppyArgumentsObject::kCalleeOffset));
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(r5);
+  __ str(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
+
+  // Set up the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, r4 will point there, otherwise
+  // it will point to the backing store.
+  __ add(r4, r0, Operand(JSSloppyArgumentsObject::kSize));
+  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+
+  // r0 = address of new object (tagged)
+  // r2 = argument count (tagged)
+  // r4 = address of parameter map or backing store (tagged)
+  // r6 = mapped parameter count (tagged)
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  __ cmp(r6, Operand(Smi::FromInt(0)));
+  // Move backing store address to r1, because it is
+  // expected there when filling in the unmapped arguments.
+  __ mov(r1, r4, LeaveCC, eq);
+  __ b(eq, &skip_parameter_map);
+
+  __ LoadRoot(r5, Heap::kSloppyArgumentsElementsMapRootIndex);
+  __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
+  __ add(r5, r6, Operand(Smi::FromInt(2)));
+  __ str(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
+  __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
+  __ add(r5, r4, Operand(r6, LSL, 1));
+  __ add(r5, r5, Operand(kParameterMapHeaderSize));
+  __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+  __ mov(r5, r6);
+  __ add(r9, r2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+  __ sub(r9, r9, Operand(r6));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ add(r1, r4, Operand(r5, LSL, 1));
+  __ add(r1, r1, Operand(kParameterMapHeaderSize));
+
+  // r1 = address of backing store (tagged)
+  // r4 = address of parameter map (tagged), which is also the address of new
+  //      object + Heap::kSloppyArgumentsObjectSize (tagged)
+  // r0 = temporary scratch (a.o., for address calculation)
+  // r5 = loop variable (tagged)
+  // ip = the hole value
+  __ jmp(&parameters_test);
+
+  __ bind(&parameters_loop);
+  __ sub(r5, r5, Operand(Smi::FromInt(1)));
+  __ mov(r0, Operand(r5, LSL, 1));
+  __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+  __ str(r9, MemOperand(r4, r0));
+  __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+  __ str(ip, MemOperand(r1, r0));
+  __ add(r9, r9, Operand(Smi::FromInt(1)));
+  __ bind(&parameters_test);
+  __ cmp(r5, Operand(Smi::FromInt(0)));
+  __ b(ne, &parameters_loop);
+
+  // Restore r0 = new object (tagged) and r5 = argument count (tagged).
+  __ sub(r0, r4, Operand(JSSloppyArgumentsObject::kSize));
+  __ ldr(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
+
+  __ bind(&skip_parameter_map);
+  // r0 = address of new object (tagged)
+  // r1 = address of backing store (tagged)
+  // r5 = argument count (tagged)
+  // r6 = mapped parameter count (tagged)
+  // r9 = scratch
+  // Copy arguments header and remaining slots (if there are any).
+  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
+  __ str(r9, FieldMemOperand(r1, FixedArray::kMapOffset));
+  __ str(r5, FieldMemOperand(r1, FixedArray::kLengthOffset));
+
+  Label arguments_loop, arguments_test;
+  __ sub(r3, r3, Operand(r6, LSL, 1));
+  __ jmp(&arguments_test);
+
+  __ bind(&arguments_loop);
+  __ sub(r3, r3, Operand(kPointerSize));
+  __ ldr(r4, MemOperand(r3, 0));
+  __ add(r9, r1, Operand(r6, LSL, 1));
+  __ str(r4, FieldMemOperand(r9, FixedArray::kHeaderSize));
+  __ add(r6, r6, Operand(Smi::FromInt(1)));
+
+  __ bind(&arguments_test);
+  __ cmp(r6, Operand(r5));
+  __ b(lt, &arguments_loop);
+
+  // Return.
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  // r0 = address of new object (tagged)
+  // r5 = argument count (tagged)
+  __ bind(&runtime);
+  __ Push(r1, r3, r5);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r1);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make r2 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ mov(r2, fp);
+    __ b(&loop_entry);
+    __ bind(&loop);
+    __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+    __ cmp(ip, r1);
+    __ b(ne, &loop);
+  }
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(ip, MemOperand(r3, StandardFrameConstants::kContextOffset));
+  __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(eq, &arguments_adaptor);
+  {
+    __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r0, FieldMemOperand(
+                   r1, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
+    __ add(r2, r2,
+           Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+  }
+  __ b(&arguments_done);
+  __ bind(&arguments_adaptor);
+  {
+    __ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ add(r2, r3, Operand(r0, LSL, kPointerSizeLog2 - 1));
+    __ add(r2, r2,
+           Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+  }
+  __ bind(&arguments_done);
+
+  // ----------- S t a t e -------------
+  //  -- cp : context
+  //  -- r0 : number of rest parameters (tagged)
+  //  -- r2 : pointer to first rest parameters
+  //  -- lr : return address
+  // -----------------------------------
+
+  // Allocate space for the strict arguments object plus the backing store.
+  Label allocate, done_allocate;
+  __ mov(r1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
+  __ Allocate(r1, r3, r4, r5, &allocate, TAG_OBJECT);
+  __ bind(&done_allocate);
+
+  // Setup the elements array in r3.
+  __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
+  __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
+  __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
+  __ add(r4, r3, Operand(FixedArray::kHeaderSize));
+  {
+    Label loop, done_loop;
+    __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
+    __ bind(&loop);
+    __ cmp(r4, r1);
+    __ b(eq, &done_loop);
+    __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
+    __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
+    __ add(r4, r4, Operand(1 * kPointerSize));
+    __ b(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Setup the strict arguments object in r4.
+  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r1);
+  __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kMapOffset));
+  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+  __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kPropertiesOffset));
+  __ str(r3, FieldMemOperand(r4, JSStrictArgumentsObject::kElementsOffset));
+  __ str(r0, FieldMemOperand(r4, JSStrictArgumentsObject::kLengthOffset));
+  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+  __ mov(r0, r4);
+  __ Ret();
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(r1);
+    __ Push(r0, r2, r1);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ mov(r3, r0);
+    __ Pop(r0, r2);
+  }
+  __ b(&done_allocate);
+}
+
+
 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
   Register context = cp;
   Register result = r0;
@@ -5206,11 +5424,10 @@
   __ jmp(&leave_exit_frame);
 }
 
-
 static void CallApiFunctionStubHelper(MacroAssembler* masm,
                                       const ParameterCount& argc,
                                       bool return_first_arg,
-                                      bool call_data_undefined) {
+                                      bool call_data_undefined, bool is_lazy) {
   // ----------- S t a t e -------------
   //  -- r0                  : callee
   //  -- r4                  : call_data
@@ -5246,8 +5463,10 @@
 
   // context save
   __ push(context);
-  // load context from callee
-  __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+  if (!is_lazy) {
+    // load context from callee
+    __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+  }
 
   // callee
   __ push(callee);
@@ -5339,7 +5558,7 @@
 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
   bool call_data_undefined = this->call_data_undefined();
   CallApiFunctionStubHelper(masm, ParameterCount(r3), false,
-                            call_data_undefined);
+                            call_data_undefined, false);
 }
 
 
@@ -5347,41 +5566,47 @@
   bool is_store = this->is_store();
   int argc = this->argc();
   bool call_data_undefined = this->call_data_undefined();
+  bool is_lazy = this->is_lazy();
   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined);
+                            call_data_undefined, is_lazy);
 }
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- sp[0]                  : name
-  //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object
+  //  -- sp[0]                        : name
+  //  -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
   //  -- ...
-  //  -- r2                     : api_function_address
+  //  -- r2                           : api_function_address
   // -----------------------------------
 
   Register api_function_address = ApiGetterDescriptor::function_address();
   DCHECK(api_function_address.is(r2));
 
-  __ mov(r0, sp);  // r0 = Handle<Name>
-  __ add(r1, r0, Operand(1 * kPointerSize));  // r1 = PCA
+  // v8::PropertyCallbackInfo::args_ array and name handle.
+  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+  // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+  __ mov(r0, sp);                             // r0 = Handle<Name>
+  __ add(r1, r0, Operand(1 * kPointerSize));  // r1 = v8::PCI::args_
 
   const int kApiStackSpace = 1;
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
-  // Create PropertyAccessorInfo instance on the stack above the exit frame with
-  // r1 (internal::Object** args_) as the data.
+  // Create v8::PropertyCallbackInfo object on the stack and initialize
+  // it's args_ field.
   __ str(r1, MemOperand(sp, 1 * kPointerSize));
-  __ add(r1, sp, Operand(1 * kPointerSize));  // r1 = AccessorInfo&
-
-  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+  __ add(r1, sp, Operand(1 * kPointerSize));  // r1 = v8::PropertyCallbackInfo&
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
+
+  // +3 is to skip prolog, return address and name handle.
+  MemOperand return_value_operand(
+      fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
-                           kStackUnwindSpace, NULL,
-                           MemOperand(fp, 6 * kPointerSize), NULL);
+                           kStackUnwindSpace, NULL, return_value_operand, NULL);
 }
 
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index c34acd6..2dee363 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -108,23 +108,23 @@
     __ b(lt, &size_less_than_8);
     __ cmp(chars, Operand(32));
     __ b(lt, &less_32);
-    if (CpuFeatures::cache_line_size() == 32) {
+    if (CpuFeatures::dcache_line_size() == 32) {
       __ pld(MemOperand(src, 32));
     }
     __ cmp(chars, Operand(64));
     __ b(lt, &less_64);
     __ pld(MemOperand(src, 64));
-    if (CpuFeatures::cache_line_size() == 32) {
+    if (CpuFeatures::dcache_line_size() == 32) {
       __ pld(MemOperand(src, 96));
     }
     __ cmp(chars, Operand(128));
     __ b(lt, &less_128);
     __ pld(MemOperand(src, 128));
-    if (CpuFeatures::cache_line_size() == 32) {
+    if (CpuFeatures::dcache_line_size() == 32) {
       __ pld(MemOperand(src, 160));
     }
     __ pld(MemOperand(src, 192));
-    if (CpuFeatures::cache_line_size() == 32) {
+    if (CpuFeatures::dcache_line_size() == 32) {
       __ pld(MemOperand(src, 224));
     }
     __ cmp(chars, Operand(256));
@@ -134,7 +134,7 @@
     __ bind(&loop);
     __ pld(MemOperand(src, 256));
     __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
-    if (CpuFeatures::cache_line_size() == 32) {
+    if (CpuFeatures::dcache_line_size() == 32) {
       __ pld(MemOperand(src, 256));
     }
     __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index efc060a..b9d4788 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -219,6 +219,22 @@
 };
 
 
+enum BarrierOption {
+  OSHLD = 0x1,
+  OSHST = 0x2,
+  OSH = 0x3,
+  NSHLD = 0x5,
+  NSHST = 0x6,
+  NSH = 0x7,
+  ISHLD = 0x9,
+  ISHST = 0xa,
+  ISH = 0xb,
+  LD = 0xd,
+  ST = 0xe,
+  SY = 0xf,
+};
+
+
 // -----------------------------------------------------------------------------
 // Addressing modes and instruction variants.
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 38635ea..3e9fac7 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -85,27 +85,6 @@
 }
 
 
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  // Set the register values. The values are not important as there are no
-  // callee saved registers in JavaScript frames, so all registers are
-  // spilled. Registers fp and sp are set to the correct values though.
-
-  for (int i = 0; i < Register::kNumRegisters; i++) {
-    input_->SetRegister(i, i * 4);
-  }
-  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
-  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
-    input_->SetDoubleRegister(i, 0.0);
-  }
-
-  // Fill the frame content from the actual data on the frame.
-  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
-    input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
-  }
-}
-
-
 void Deoptimizer::SetPlatformCompiledStubRegisters(
     FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
   ApiFunction function(descriptor->deoptimization_handler());
@@ -124,8 +103,7 @@
   }
 }
 
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
   // There is no dynamic alignment padding on ARM in the input frame.
   return false;
 }
@@ -288,14 +266,11 @@
   __ CheckFor32DRegs(ip);
 
   __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
-  int src_offset = FrameDescription::double_registers_offset();
-  for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
-    if (i == kDoubleRegZero.code()) continue;
-    if (i == kScratchDoubleReg.code()) continue;
-
-    const DwVfpRegister reg = DwVfpRegister::from_code(i);
-    __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
-    src_offset += kDoubleSize;
+  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+    int code = config->GetAllocatableDoubleCode(i);
+    DwVfpRegister reg = DwVfpRegister::from_code(code);
+    int src_offset = code * kDoubleSize + double_regs_offset;
+    __ vldr(reg, r1, src_offset);
   }
 
   // Push state, pc, and continuation from the last output frame.
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 66b7f45..9258703 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1188,7 +1188,13 @@
                   }
                 }
               } else {
-                UNREACHABLE();
+                // PU == 0b01, BW == 0b11, Bits(9, 6) != 0b0001
+                if ((instr->Bits(20, 16) == 0x1f) &&
+                    (instr->Bits(11, 4) == 0xf3)) {
+                  Format(instr, "rbit'cond 'rd, 'rm");
+                } else {
+                  UNREACHABLE();
+                }
               }
               break;
           }
@@ -1689,6 +1695,12 @@
 }
 
 
+static const char* const barrier_option_names[] = {
+    "invalid", "oshld", "oshst", "osh", "invalid", "nshld", "nshst", "nsh",
+    "invalid", "ishld", "ishst", "ish", "invalid", "ld",    "st",    "sy",
+};
+
+
 void Decoder::DecodeSpecialCondition(Instruction* instr) {
   switch (instr->SpecialValue()) {
     case 5:
@@ -1765,6 +1777,24 @@
           out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
                                       "pld [r%d, #+%d]", Rn, offset);
         }
+      } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
+        int option = instr->Bits(3, 0);
+        switch (instr->Bits(7, 4)) {
+          case 4:
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "dsb %s", barrier_option_names[option]);
+            break;
+          case 5:
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "dmb %s", barrier_option_names[option]);
+            break;
+          case 6:
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "isb %s", barrier_option_names[option]);
+            break;
+          default:
+            Unknown(instr);
+        }
       } else {
         Unknown(instr);
       }
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
index b7fad7b..1f55c0b 100644
--- a/src/arm/interface-descriptors-arm.cc
+++ b/src/arm/interface-descriptors-arm.cc
@@ -56,20 +56,6 @@
 const Register StringCompareDescriptor::RightRegister() { return r0; }
 
 
-const Register ArgumentsAccessReadDescriptor::index() { return r1; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return r0; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return r1; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return r2; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r3; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return r2; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return r3; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return r4; }
-
-
 const Register ApiGetterDescriptor::function_address() { return r2; }
 
 
@@ -98,6 +84,32 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r1, r3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r1};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r1};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r1};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 
 void ToNumberDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -115,6 +127,10 @@
 
 
 // static
+const Register ToNameDescriptor::ReceiverRegister() { return r0; }
+
+
+// static
 const Register ToObjectDescriptor::ReceiverRegister() { return r0; }
 
 
@@ -167,13 +183,6 @@
 }
 
 
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3, r0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void CallFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r1};
@@ -432,6 +441,14 @@
                                    &default_descriptor);
 }
 
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+      kInterpreterDispatchTableRegister};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -443,7 +460,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
@@ -455,7 +471,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterCEntryDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 57fa3f5..80aef0c 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -185,6 +185,9 @@
   }
 }
 
+void MacroAssembler::Drop(Register count, Condition cond) {
+  add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
+}
 
 void MacroAssembler::Ret(int drop, Condition cond) {
   Drop(drop, cond);
@@ -449,9 +452,9 @@
                                 Condition cond,
                                 Label* branch) {
   DCHECK(cond == eq || cond == ne);
-  and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
-  cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
-  b(cond, branch);
+  const int mask =
+      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+  CheckPageFlag(object, scratch, mask, cond, branch);
 }
 
 
@@ -648,6 +651,69 @@
   }
 }
 
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+                                               Register code_entry,
+                                               Register scratch) {
+  const int offset = JSFunction::kCodeEntryOffset;
+
+  // Since a code entry (value) is always in old space, we don't need to update
+  // remembered set. If incremental marking is off, there is nothing for us to
+  // do.
+  if (!FLAG_incremental_marking) return;
+
+  DCHECK(js_function.is(r1));
+  DCHECK(code_entry.is(r4));
+  DCHECK(scratch.is(r5));
+  AssertNotSmi(js_function);
+
+  if (emit_debug_code()) {
+    add(scratch, js_function, Operand(offset - kHeapObjectTag));
+    ldr(ip, MemOperand(scratch));
+    cmp(ip, code_entry);
+    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+
+  CheckPageFlag(code_entry, scratch,
+                MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+  CheckPageFlag(js_function, scratch,
+                MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+  const Register dst = scratch;
+  add(dst, js_function, Operand(offset - kHeapObjectTag));
+
+  push(code_entry);
+
+  // Save caller-saved registers, which includes js_function.
+  DCHECK((kCallerSaved & js_function.bit()) != 0);
+  DCHECK_EQ(kCallerSaved & code_entry.bit(), 0);
+  stm(db_w, sp, (kCallerSaved | lr.bit()));
+
+  int argument_count = 3;
+  PrepareCallCFunction(argument_count, code_entry);
+
+  mov(r0, js_function);
+  mov(r1, dst);
+  mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+
+  {
+    AllowExternalCallThatCantCauseGC scope(this);
+    CallCFunction(
+        ExternalReference::incremental_marking_record_write_code_entry_function(
+            isolate()),
+        argument_count);
+  }
+
+  // Restore caller-saved registers (including js_function and code_entry).
+  ldm(ia_w, sp, (kCallerSaved | lr.bit()));
+
+  pop(code_entry);
+
+  bind(&done);
+}
 
 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
                                          Register address,
@@ -1330,7 +1396,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -2506,18 +2572,6 @@
 }
 
 
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                                   const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  DCHECK(flag == JUMP_FUNCTION || has_frame());
-
-  // Fake a parameter count to avoid emitting code to do the check.
-  ParameterCount expected(0);
-  LoadNativeContextSlot(native_context_index, r1);
-  InvokeFunctionCode(r1, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
                                 Register scratch1, Register scratch2) {
   if (FLAG_native_code_counters && counter->Enabled()) {
@@ -2613,9 +2667,9 @@
     // We don't actually want to generate a pile of code for this, so just
     // claim there is a stack frame, without generating one.
     FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   } else {
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   }
   // will not return here
   if (is_const_pool_blocked()) {
@@ -2822,6 +2876,20 @@
 }
 
 
+void MacroAssembler::AssertReceiver(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    tst(object, Operand(kSmiTagMask));
+    Check(ne, kOperandIsASmiAndNotAReceiver);
+    push(object);
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
+    pop(object);
+    Check(hs, kOperandIsNotAReceiver);
+  }
+}
+
+
 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
                                                      Register scratch) {
   if (emit_debug_code()) {
@@ -3259,6 +3327,7 @@
     int mask,
     Condition cc,
     Label* condition_met) {
+  DCHECK(cc == eq || cc == ne);
   Bfc(scratch, object, 0, kPageSizeBits);
   ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
   tst(scratch, Operand(mask));
@@ -3396,7 +3465,8 @@
 }
 
 
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+  Register null_value = r5;
   Register  empty_fixed_array_value = r6;
   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   Label next, start;
@@ -3410,6 +3480,7 @@
   cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
   b(eq, call_runtime);
 
+  LoadRoot(null_value, Heap::kNullValueRootIndex);
   jmp(&start);
 
   bind(&next);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 26811b9..468f4b5 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -16,6 +16,7 @@
 // Give alias names to registers for calling conventions.
 const Register kReturnRegister0 = {Register::kCode_r0};
 const Register kReturnRegister1 = {Register::kCode_r1};
+const Register kReturnRegister2 = {Register::kCode_r2};
 const Register kJSFunctionRegister = {Register::kCode_r1};
 const Register kContextRegister = {Register::kCode_r7};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
@@ -127,6 +128,7 @@
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
   void Drop(int count, Condition cond = al);
+  void Drop(Register count, Condition cond = al);
 
   void Ret(int drop, Condition cond = al);
 
@@ -218,7 +220,7 @@
   void JumpIfNotInNewSpace(Register object,
                            Register scratch,
                            Label* branch) {
-    InNewSpace(object, scratch, ne, branch);
+    InNewSpace(object, scratch, eq, branch);
   }
 
   // Check if object is in new space.  Jumps if the object is in new space.
@@ -226,7 +228,7 @@
   void JumpIfInNewSpace(Register object,
                         Register scratch,
                         Label* branch) {
-    InNewSpace(object, scratch, eq, branch);
+    InNewSpace(object, scratch, ne, branch);
   }
 
   // Check if an object has a given incremental marking color.
@@ -288,6 +290,11 @@
                      pointers_to_here_check_for_value);
   }
 
+  // Notify the garbage collector that we wrote a code entry into a
+  // JSFunction. Only scratch is clobbered by the operation.
+  void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+                                 Register scratch);
+
   void RecordWriteForMap(
       Register object,
       Register map,
@@ -315,7 +322,6 @@
 
   // Push two registers.  Pushes leftmost register first (to highest address).
   void Push(Register src1, Register src2, Condition cond = al) {
-    DCHECK(!src1.is(src2));
     if (src1.code() > src2.code()) {
       stm(db_w, sp, src1.bit() | src2.bit(), cond);
     } else {
@@ -326,7 +332,6 @@
 
   // Push three registers.  Pushes leftmost register first (to highest address).
   void Push(Register src1, Register src2, Register src3, Condition cond = al) {
-    DCHECK(!AreAliased(src1, src2, src3));
     if (src1.code() > src2.code()) {
       if (src2.code() > src3.code()) {
         stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@@ -346,7 +351,6 @@
             Register src3,
             Register src4,
             Condition cond = al) {
-    DCHECK(!AreAliased(src1, src2, src3, src4));
     if (src1.code() > src2.code()) {
       if (src2.code() > src3.code()) {
         if (src3.code() > src4.code()) {
@@ -371,7 +375,6 @@
   // Push five registers.  Pushes leftmost register first (to highest address).
   void Push(Register src1, Register src2, Register src3, Register src4,
             Register src5, Condition cond = al) {
-    DCHECK(!AreAliased(src1, src2, src3, src4, src5));
     if (src1.code() > src2.code()) {
       if (src2.code() > src3.code()) {
         if (src3.code() > src4.code()) {
@@ -1143,10 +1146,6 @@
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
-  // Invoke specified builtin JavaScript function.
-  void InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                     const CallWrapper& call_wrapper = NullCallWrapper());
-
   Handle<Object> CodeObject() {
     DCHECK(!code_object_.is_null());
     return code_object_;
@@ -1298,6 +1297,9 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+  void AssertReceiver(Register object);
+
   // Abort execution if argument is not undefined or an AllocationSite, enabled
   // via --debug-code.
   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1407,7 +1409,7 @@
 
   // Expects object in r0 and returns map with validated enum cache
   // in r0.  Assumes that any other register can be used as a scratch.
-  void CheckEnumCache(Register null_value, Label* call_runtime);
+  void CheckEnumCache(Label* call_runtime);
 
   // AllocationMemento support. Arrays may have an associated
   // AllocationMemento object that can be checked for in order to pretransition
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 6e19388..4630b94 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -14,6 +14,7 @@
 #include "src/base/bits.h"
 #include "src/codegen.h"
 #include "src/disasm.h"
+#include "src/runtime/runtime-utils.h"
 
 #if defined(USE_SIMULATOR)
 
@@ -391,7 +392,8 @@
           HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
           int value = *cur;
           Heap* current_heap = sim_->isolate_->heap();
-          if (((value & 1) == 0) || current_heap->Contains(obj)) {
+          if (((value & 1) == 0) ||
+              current_heap->ContainsSlow(obj->address())) {
             PrintF(" (");
             if ((value & 1) == 0) {
               PrintF("smi %d", value / 2);
@@ -1717,6 +1719,10 @@
                                         int32_t arg4,
                                         int32_t arg5);
 
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
+                                                   int32_t arg2, int32_t arg3,
+                                                   int32_t arg4);
+
 // These prototypes handle the four types of FP calls.
 typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
 typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
@@ -1900,9 +1906,36 @@
             reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
                 external);
         target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+      } else if (redirection->type() ==
+                 ExternalReference::BUILTIN_CALL_TRIPLE) {
+        // builtin call returning ObjectTriple.
+        SimulatorRuntimeTripleCall target =
+            reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          PrintF(
+              "Call to host triple returning runtime function %p "
+              "args %08x, %08x, %08x, %08x, %08x",
+              FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08x\n", get_register(sp));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        // arg0 is a hidden argument pointing to the return location, so don't
+        // pass it to the target function.
+        ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
+        if (::v8::internal::FLAG_trace_sim) {
+          PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+        }
+        // Return is passed back in address pointed to by hidden first argument.
+        ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
+        *sim_result = result;
+        set_register(r0, arg0);
       } else {
         // builtin call.
-        DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+        DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+               redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
         SimulatorRuntimeCall target =
             reinterpret_cast<SimulatorRuntimeCall>(external);
         if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -2887,7 +2920,15 @@
                   }
                 }
               } else {
-                UNIMPLEMENTED();
+                // PU == 0b01, BW == 0b11, Bits(9, 6) != 0b0001
+                if ((instr->Bits(20, 16) == 0x1f) &&
+                    (instr->Bits(11, 4) == 0xf3)) {
+                  // Rbit.
+                  uint32_t rm_val = get_register(instr->RmValue());
+                  set_register(rd, base::bits::ReverseBits(rm_val));
+                } else {
+                  UNIMPLEMENTED();
+                }
               }
               break;
           }
@@ -3871,6 +3912,9 @@
     case 0xB:
       if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
         // pld: ignore instruction.
+      } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
+        // dsb, dmb, isb: ignore instruction for now.
+        // TODO(binji): implement
       } else {
         UNIMPLEMENTED();
       }
diff --git a/src/arm64/assembler-arm64-inl.h b/src/arm64/assembler-arm64-inl.h
index d776979..aeca563 100644
--- a/src/arm64/assembler-arm64-inl.h
+++ b/src/arm64/assembler-arm64-inl.h
@@ -731,8 +731,8 @@
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
       target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target));
   }
 }
 
@@ -853,24 +853,6 @@
 }
 
 
-bool RelocInfo::IsPatchedReturnSequence() {
-  // The sequence must be:
-  //   ldr ip0, [pc, #offset]
-  //   blr ip0
-  // See arm64/debug-arm64.cc DebugCodegen::PatchDebugBreakSlot
-  Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
-  Instruction* i2 = i1->following();
-  return i1->IsLdrLiteralX() && (i1->Rt() == kIp0Code) &&
-         i2->IsBranchAndLinkToRegister() && (i2->Rn() == kIp0Code);
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
-  Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
-  return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
-}
-
-
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h
index 5854704..47786eb 100644
--- a/src/arm64/assembler-arm64.h
+++ b/src/arm64/assembler-arm64.h
@@ -369,6 +369,8 @@
 
 typedef FPRegister DoubleRegister;
 
+// TODO(arm64) Define SIMD registers.
+typedef FPRegister Simd128Register;
 
 // -----------------------------------------------------------------------------
 // Lists of registers.
@@ -925,7 +927,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, const SourcePosition position);
+  void RecordDeoptReason(const int reason, int raw_position);
 
   int buffer_space() const;
 
diff --git a/src/arm64/builtins-arm64.cc b/src/arm64/builtins-arm64.cc
index b6bae4a..11f66a4 100644
--- a/src/arm64/builtins-arm64.cc
+++ b/src/arm64/builtins-arm64.cc
@@ -138,6 +138,97 @@
 
 
 // static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- x0                 : number of arguments
+  //  -- lr                 : return address
+  //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- sp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  ASM_LOCATION("Builtins::Generate_MathMaxMin");
+
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in x1 and the double value in d1.
+  __ LoadRoot(x1, root_index);
+  __ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset));
+
+  // Remember how many slots to drop (including the receiver).
+  __ Add(x4, x0, 1);
+
+  Label done_loop, loop;
+  __ Bind(&loop);
+  {
+    // Check if all parameters done.
+    __ Subs(x0, x0, 1);
+    __ B(lt, &done_loop);
+
+    // Load the next parameter tagged value into x2.
+    __ Peek(x2, Operand(x0, LSL, kPointerSizeLog2));
+
+    // Load the double value of the parameter into d2, maybe converting the
+    // parameter to a number first using the ToNumberStub if necessary.
+    Label convert_smi, convert_number, done_convert;
+    __ JumpIfSmi(x2, &convert_smi);
+    __ JumpIfHeapNumber(x2, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumberStub to convert it.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(x0);
+      __ SmiTag(x4);
+      __ Push(x0, x1, x4);
+      __ Mov(x0, x2);
+      ToNumberStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ Mov(x2, x0);
+      __ Pop(x4, x1, x0);
+      {
+        // Restore the double accumulator value (d1).
+        Label done_restore;
+        __ SmiUntagToDouble(d1, x1, kSpeculativeUntag);
+        __ JumpIfSmi(x1, &done_restore);
+        __ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset));
+        __ Bind(&done_restore);
+      }
+      __ SmiUntag(x4);
+      __ SmiUntag(x0);
+    }
+    __ AssertNumber(x2);
+    __ JumpIfSmi(x2, &convert_smi);
+
+    __ Bind(&convert_number);
+    __ Ldr(d2, FieldMemOperand(x2, HeapNumber::kValueOffset));
+    __ B(&done_convert);
+
+    __ Bind(&convert_smi);
+    __ SmiUntagToDouble(d2, x2);
+    __ Bind(&done_convert);
+
+    // We can use a single fmin/fmax for the operation itself, but we then need
+    // to work out which HeapNumber (or smi) the result came from.
+    __ Fmov(x11, d1);
+    if (kind == MathMaxMinKind::kMin) {
+      __ Fmin(d1, d1, d2);
+    } else {
+      DCHECK(kind == MathMaxMinKind::kMax);
+      __ Fmax(d1, d1, d2);
+    }
+    __ Fmov(x10, d1);
+    __ Cmp(x10, x11);
+    __ Csel(x1, x1, x2, eq);
+    __ B(&loop);
+  }
+
+  __ Bind(&done_loop);
+  __ Mov(x0, x1);
+  __ Drop(x4);
+  __ Ret();
+}
+
+// static
 void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- x0                     : number of arguments
@@ -229,8 +320,9 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(x2, x1, x3);  // first argument, constructor, new target
-    __ CallRuntime(Runtime::kNewObject);
+    __ Push(x2);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(x2);
   }
   __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
@@ -356,34 +448,15 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(x2, x1, x3);  // first argument, constructor, new target
-    __ CallRuntime(Runtime::kNewObject);
+    __ Push(x2);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(x2);
   }
   __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
   __ Ret();
 }
 
-
-static void CallRuntimePassFunction(MacroAssembler* masm,
-                                    Runtime::FunctionId function_id) {
-  // ----------- S t a t e -------------
-  //  -- x1 : target function (preserved for callee)
-  //  -- x3 : new target (preserved for callee)
-  // -----------------------------------
-
-  FrameScope scope(masm, StackFrame::INTERNAL);
-  // Push a copy of the target function and the new target.
-  // Push another copy as a parameter to the runtime call.
-  __ Push(x1, x3, x1);
-
-  __ CallRuntime(function_id, 1);
-
-  // Restore target function and new target.
-  __ Pop(x3, x1);
-}
-
-
 static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
   __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
   __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
@@ -391,10 +464,30 @@
   __ Br(x2);
 }
 
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- x0 : argument count (preserved for callee)
+  //  -- x1 : target function (preserved for callee)
+  //  -- x3 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push a copy of the target function and the new target.
+    // Push another copy as a parameter to the runtime call.
+    __ SmiTag(x0);
+    __ Push(x0, x1, x3, x1);
 
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
-  __ Add(x0, x0, Code::kHeaderSize - kHeapObjectTag);
-  __ Br(x0);
+    __ CallRuntime(function_id, 1);
+    __ Move(x2, x0);
+
+    // Restore target function and new target.
+    __ Pop(x3, x1, x0);
+    __ SmiUntag(x0);
+  }
+
+  __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+  __ Br(x2);
 }
 
 
@@ -408,8 +501,7 @@
   __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
   __ B(hs, &ok);
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
 
   __ Bind(&ok);
   GenerateTailCallToSharedCode(masm);
@@ -418,7 +510,8 @@
 
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
-                                           bool create_implicit_receiver) {
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- x0     : number of arguments
   //  -- x1     : constructor function
@@ -448,148 +541,18 @@
     __ Push(allocation_site, argc);
 
     if (create_implicit_receiver) {
-      // sp[0]: new.target
-      // sp[1]: Constructor function.
-      // sp[2]: number of arguments (smi-tagged)
-      // sp[3]: allocation site
-      // Try to allocate the object without transitioning into C code. If any of
-      // the preconditions is not met, the code bails out to the runtime call.
-      Label rt_call, allocated;
-      if (FLAG_inline_new) {
-        // Verify that the new target is a JSFunction.
-        __ JumpIfNotObjectType(new_target, x10, x11, JS_FUNCTION_TYPE,
-                               &rt_call);
-
-        // Load the initial map and verify that it is in fact a map.
-        Register init_map = x2;
-        __ Ldr(init_map,
-               FieldMemOperand(new_target,
-                               JSFunction::kPrototypeOrInitialMapOffset));
-        __ JumpIfSmi(init_map, &rt_call);
-        __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
-
-        // Fall back to runtime if the expected base constructor and base
-        // constructor differ.
-        __ Ldr(x10,
-               FieldMemOperand(init_map, Map::kConstructorOrBackPointerOffset));
-        __ Cmp(constructor, x10);
-        __ B(ne, &rt_call);
-
-        // Check that the constructor is not constructing a JSFunction (see
-        // comments in Runtime_NewObject in runtime.cc). In which case the
-        // initial
-        // map's instance type would be JS_FUNCTION_TYPE.
-        __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
-        __ B(eq, &rt_call);
-
-        // Now allocate the JSObject on the heap.
-        Register obj_size = x10;
-        Register new_obj = x4;
-        Register next_obj = obj_size;  // May overlap.
-        __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
-        __ Allocate(obj_size, new_obj, next_obj, x11, &rt_call, SIZE_IN_WORDS);
-
-        // Allocated the JSObject, now initialize the fields. Map is set to
-        // initial map and properties and elements are set to empty fixed array.
-        // NB. the object pointer is not tagged, so MemOperand is used.
-        Register write_address = x5;
-        Register empty = x7;
-        __ Mov(write_address, new_obj);
-        __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
-        STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
-        __ Str(init_map, MemOperand(write_address, kPointerSize, PostIndex));
-        STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
-        STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
-        __ Stp(empty, empty,
-               MemOperand(write_address, 2 * kPointerSize, PostIndex));
-        STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
-
-        // Add the object tag to make the JSObject real, so that we can continue
-        // and jump into the continuation code at any time from now on.
-        __ Add(new_obj, new_obj, kHeapObjectTag);
-
-        // Fill all of the in-object properties with the appropriate filler.
-        Register filler = x7;
-        __ LoadRoot(filler, Heap::kUndefinedValueRootIndex);
-
-        if (!is_api_function) {
-          Label no_inobject_slack_tracking;
-
-          Register constructon_count = x14;
-          MemOperand bit_field3 =
-              FieldMemOperand(init_map, Map::kBitField3Offset);
-          // Check if slack tracking is enabled.
-          __ Ldr(x11, bit_field3);
-          __ DecodeField<Map::ConstructionCounter>(constructon_count, x11);
-          __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
-          __ B(lt, &no_inobject_slack_tracking);
-          // Decrease generous allocation count.
-          __ Subs(x11, x11, Operand(1 << Map::ConstructionCounter::kShift));
-          __ Str(x11, bit_field3);
-
-          // Allocate object with a slack.
-          Register unused_props = x11;
-          __ Ldr(unused_props,
-                 FieldMemOperand(init_map, Map::kInstanceAttributesOffset));
-          __ Ubfx(unused_props, unused_props,
-                  Map::kUnusedPropertyFieldsByte * kBitsPerByte, kBitsPerByte);
-
-          Register end_of_pre_allocated = x11;
-          __ Sub(end_of_pre_allocated, next_obj,
-                 Operand(unused_props, LSL, kPointerSizeLog2));
-          unused_props = NoReg;
-
-          if (FLAG_debug_code) {
-            __ Cmp(write_address, end_of_pre_allocated);
-            __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
-          }
-
-          // Fill the pre-allocated fields with undef.
-          __ InitializeFieldsWithFiller(write_address, end_of_pre_allocated,
-                                        filler);
-
-          // Fill the remaining fields with one pointer filler map.
-          __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
-          __ InitializeFieldsWithFiller(write_address, next_obj, filler);
-
-          __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
-          __ B(ne, &allocated);
-
-          // Push the constructor, new_target and the object to the stack,
-          // and then the initial map as an argument to the runtime call.
-          __ Push(constructor, new_target, new_obj, init_map);
-          __ CallRuntime(Runtime::kFinalizeInstanceSize);
-          __ Pop(new_obj, new_target, constructor);
-
-          // Continue with JSObject being successfully allocated.
-          __ B(&allocated);
-
-          __ bind(&no_inobject_slack_tracking);
-        }
-
-        __ InitializeFieldsWithFiller(write_address, next_obj, filler);
-
-        // Continue with JSObject being successfully allocated.
-        __ B(&allocated);
-      }
-
-      // Allocate the new receiver object using the runtime call.
-      // x1: constructor function
-      // x3: new target
-      __ Bind(&rt_call);
-
-      // Push the constructor and new_target twice, second pair as arguments
-      // to the runtime call.
-      __ Push(constructor, new_target, constructor, new_target);
-      __ CallRuntime(Runtime::kNewObject);
+      // Allocate the new receiver object.
+      __ Push(constructor, new_target);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
       __ Mov(x4, x0);
       __ Pop(new_target, constructor);
 
-      // Receiver for constructor call allocated.
-      // x1: constructor function
-      // x3: new target
-      // x4: JSObject
-      __ Bind(&allocated);
+      // ----------- S t a t e -------------
+      //  -- x1: constructor function
+      //  -- x3: new target
+      //  -- x4: newly allocated object
+      // -----------------------------------
 
       // Reload the number of arguments from the stack.
       // Set it up in x0 for the function call below.
@@ -697,6 +660,19 @@
     // Leave construct frame.
   }
 
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(x0, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ Bind(&dont_throw);
+  }
+
   __ DropBySMI(x1);
   __ Drop(1);
   if (create_implicit_receiver) {
@@ -707,17 +683,23 @@
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, true);
+  Generate_JSConstructStubHelper(masm, false, true, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true, true);
+  Generate_JSConstructStubHelper(masm, true, false, false);
 }
 
 
 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, false);
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
 
@@ -877,10 +859,8 @@
 //   - jssp: stack pointer.
 //   - lr: return address.
 //
-// The function builds a JS frame.  Please see JavaScriptFrameConstants in
-// frames-arm64.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -888,17 +868,19 @@
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ Push(lr, fp, cp, x1);
   __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
-  __ Push(x3);
-
-  // Push zero for bytecode array offset.
-  __ Mov(x0, Operand(0));
-  __ Push(x0);
 
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into kInterpreterBytecodeRegister.
   __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  DCHECK(!debug_info.is(x0));
+  __ Ldr(debug_info, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
+  __ Cmp(debug_info, Operand(DebugInfo::uninitialized()));
+  __ B(ne, &load_debug_bytecode_array);
   __ Ldr(kInterpreterBytecodeArrayRegister,
          FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+  __ Bind(&bytecode_array_loaded);
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -909,6 +891,10 @@
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Push new.target, bytecode array and zero for bytecode array offset.
+  __ Mov(x0, Operand(0));
+  __ Push(x3, kInterpreterBytecodeArrayRegister, x0);
+
   // Allocate the local and temporary register file on the stack.
   {
     // Load frame size from the BytecodeArray object.
@@ -938,22 +924,9 @@
 
   // TODO(rmcilroy): List of things not currently dealt with here but done in
   // fullcodegen's prologue:
-  //  - Support profiler (specifically profiling_counter).
   //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Allow simulator stop operations if FLAG_stop_at is set.
   //  - Code aging of the BytecodeArray object.
 
-  // Perform stack guard check.
-  {
-    Label ok;
-    __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
-    __ B(hs, &ok);
-    __ Push(kInterpreterBytecodeArrayRegister);
-    __ CallRuntime(Runtime::kStackGuard);
-    __ Pop(kInterpreterBytecodeArrayRegister);
-    __ Bind(&ok);
-  }
-
   // Load accumulator, register file, bytecode offset, dispatch table into
   // registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -961,10 +934,9 @@
          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
   __ Mov(kInterpreterBytecodeOffsetRegister,
          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
-         Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
 
   // Dispatch to the first bytecode handler for the function.
   __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
@@ -975,6 +947,15 @@
   // and header removal.
   __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Call(ip0);
+
+  // Even though the first bytecode handler was called, we will never return.
+  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+  // Load debug copy of the bytecode array.
+  __ Bind(&load_debug_bytecode_array);
+  __ Ldr(kInterpreterBytecodeArrayRegister,
+         FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+  __ B(&bytecode_array_loaded);
 }
 
 
@@ -998,47 +979,24 @@
 }
 
 
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(kInterpreterAccumulatorRegister);  // Save accumulator register.
-
-    // Pass the deoptimization type to the runtime system.
-    __ Mov(x1, Operand(Smi::FromInt(static_cast<int>(type))));
-    __ Push(x1);
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-
-    __ Pop(kInterpreterAccumulatorRegister);  // Restore accumulator register.
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use this for interpreter deopts).
-  __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
   // Initialize register file register and dispatch table register.
   __ Add(kInterpreterRegisterFileRegister, fp,
          Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
-         Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
 
   // Get the context from the frame.
-  // TODO(rmcilroy): Update interpreter frame to expect current context at the
-  // context slot instead of the function context.
   __ Ldr(kContextRegister,
          MemOperand(kInterpreterRegisterFileRegister,
                     InterpreterFrameConstants::kContextFromRegisterPointer));
 
   // Get the bytecode array pointer from the frame.
-  __ Ldr(x1,
-         MemOperand(kInterpreterRegisterFileRegister,
-                    InterpreterFrameConstants::kFunctionFromRegisterPointer));
-  __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
-  __ Ldr(kInterpreterBytecodeArrayRegister,
-         FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
+  __ Ldr(
+      kInterpreterBytecodeArrayRegister,
+      MemOperand(kInterpreterRegisterFileRegister,
+                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -1066,6 +1024,29 @@
 }
 
 
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+    MacroAssembler* masm, Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass the deoptimization type to the runtime system.
+    __ Mov(x1, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ Push(x1);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+    // Tear down internal frame.
+  }
+
+  // Drop state (we don't use these for interpreter deopts) and and pop the
+  // accumulator value into the accumulator register.
+  __ Drop(1);
+  __ Pop(kInterpreterAccumulatorRegister);
+
+  // Enter the bytecode dispatch.
+  Generate_EnterBytecodeDispatch(masm);
+}
+
+
 void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
 }
@@ -1080,22 +1061,30 @@
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the address of the interpreter entry trampoline as a return address.
+  // This simulates the initial call to bytecode handlers in interpreter entry
+  // trampoline. The return will never actually be taken, but our stack walker
+  // uses this address to determine whether a frame is interpreted.
+  __ LoadObject(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+
+  Generate_EnterBytecodeDispatch(masm);
+}
+
 
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
 }
 
 
 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
 }
 
 
@@ -1321,14 +1310,11 @@
 
   // Load the next prototype.
   __ Bind(&next_prototype);
-  __ Ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
-  // End if the prototype is null or not hidden.
-  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
-  __ B(eq, receiver_check_failed);
-  __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ Ldr(x16, FieldMemOperand(map, Map::kBitField3Offset));
-  __ Tst(x16, Operand(Map::IsHiddenPrototype::kMask));
+  __ Tst(x16, Operand(Map::HasHiddenPrototype::kMask));
   __ B(eq, receiver_check_failed);
+  __ Ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Iterate.
   __ B(&prototype_loop_start);
 
@@ -1868,10 +1854,8 @@
 
     // Try to create the list from an arguments object.
     __ Bind(&create_arguments);
-    __ Ldrsw(len, UntagSmiFieldMemOperand(
-                      arguments_list,
-                      JSObject::kHeaderSize +
-                          Heap::kArgumentsLengthIndex * kPointerSize));
+    __ Ldrsw(len, UntagSmiFieldMemOperand(arguments_list,
+                                          JSArgumentsObject::kLengthOffset));
     __ Ldr(x10, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
     __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
     __ CompareAndBranch(len, x11, ne, &create_runtime);
@@ -1953,10 +1937,136 @@
   }
 }
 
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if the debugger is not active.
+  Label done;
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(masm->isolate());
+  __ Mov(scratch1, Operand(debug_is_active));
+  __ Ldrb(scratch1, MemOperand(scratch1));
+  __ Cmp(scratch1, Operand(0));
+  __ B(ne, &done);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+    __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
+    __ B(ne, &no_interpreter_frame);
+    __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ Ldr(scratch3,
+         MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ B(ne, &no_arguments_adaptor);
+
+  // Drop arguments adaptor frame and load arguments count.
+  __ mov(fp, scratch2);
+  __ Ldr(scratch1,
+         MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(scratch1);
+  __ B(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ Ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ Ldr(scratch1,
+         FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldrsw(scratch1,
+           FieldMemOperand(scratch1,
+                           SharedFunctionInfo::kFormalParameterCountOffset));
+  __ bind(&formal_parameter_count_loaded);
+
+  // Calculate the end of destination area where we will put the arguments
+  // after we drop current frame. We add kPointerSize to count the receiver
+  // argument which is not included into formal parameters count.
+  Register dst_reg = scratch2;
+  __ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
+  __ add(dst_reg, dst_reg,
+         Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+  Register src_reg = scratch1;
+  __ add(src_reg, jssp, Operand(args_reg, LSL, kPointerSizeLog2));
+  // Count receiver argument as well (not included in args_reg).
+  __ add(src_reg, src_reg, Operand(kPointerSize));
+
+  if (FLAG_debug_code) {
+    __ Cmp(src_reg, dst_reg);
+    __ Check(lo, kStackAccessBelowStackPointer);
+  }
+
+  // Restore caller's frame pointer and return address now as they will be
+  // overwritten by the copying loop.
+  __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+
+  // Both src_reg and dst_reg are pointing to the word after the one to copy,
+  // so they must be pre-decremented in the loop.
+  Register tmp_reg = scratch3;
+  Label loop, entry;
+  __ B(&entry);
+  __ bind(&loop);
+  __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
+  __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
+  __ bind(&entry);
+  __ Cmp(jssp, src_reg);
+  __ B(ne, &loop);
+
+  // Leave current frame.
+  __ Mov(jssp, dst_reg);
+  __ SetStackPointer(jssp);
+  __ AssertStackConsistency();
+
+  __ bind(&done);
+}
+}  // namespace
 
 // static
 void Builtins::Generate_CallFunction(MacroAssembler* masm,
-                                     ConvertReceiverMode mode) {
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
   ASM_LOCATION("Builtins::Generate_CallFunction");
   // ----------- S t a t e -------------
   //  -- x0 : the number of arguments (not including the receiver)
@@ -2044,6 +2154,10 @@
   //  -- cp : the function context.
   // -----------------------------------
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, x0, x3, x4, x5);
+  }
+
   __ Ldrsw(
       x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
   ParameterCount actual(x0);
@@ -2140,13 +2254,18 @@
 
 
 // static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- x0 : the number of arguments (not including the receiver)
   //  -- x1 : the function to call (checked to be a JSBoundFunction)
   // -----------------------------------
   __ AssertBoundFunction(x1);
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, x0, x3, x4, x5);
+  }
+
   // Patch the receiver to [[BoundThis]].
   __ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
   __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
@@ -2165,7 +2284,8 @@
 
 
 // static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- x0 : the number of arguments (not including the receiver)
   //  -- x1 : the target to call (can be any Object).
@@ -2175,14 +2295,24 @@
   __ JumpIfSmi(x1, &non_callable);
   __ Bind(&non_smi);
   __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
-  __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
           RelocInfo::CODE_TARGET, eq);
   __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
-  __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
           RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Call]] internal method.
+  __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
+  __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable);
+
   __ Cmp(x5, JS_PROXY_TYPE);
   __ B(ne, &non_function);
 
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, x0, x3, x4, x5);
+  }
+
   // 1. Runtime fallback for Proxy [[Call]].
   __ Push(x1);
   // Increase the arguments size to include the pushed function and the
@@ -2195,15 +2325,12 @@
   // 2. Call to something else, which might have a [[Call]] internal method (if
   // not we raise an exception).
   __ Bind(&non_function);
-  // Check if target has a [[Call]] internal method.
-  __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
-  __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable);
   // Overwrite the original receiver with the (original) target.
   __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
   // Let the "call_as_function_delegate" take care of the rest.
   __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
   __ Jump(masm->isolate()->builtins()->CallFunction(
-              ConvertReceiverMode::kNotNullOrUndefined),
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
           RelocInfo::CODE_TARGET);
 
   // 3. Call to something that is not callable.
@@ -2341,7 +2468,8 @@
 
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- x0 : the number of arguments (not including the receiver)
   //  -- x2 : the address of the first argument to be pushed. Subsequent
@@ -2369,7 +2497,9 @@
   __ B(gt, &loop_header);
 
   // Call the target.
-  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            tail_call_mode),
+          RelocInfo::CODE_TARGET);
 }
 
 
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index a1e9207..ad566e6 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -207,8 +207,7 @@
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
                                           Register right, Register scratch,
                                           FPRegister double_scratch,
-                                          Label* slow, Condition cond,
-                                          Strength strength) {
+                                          Label* slow, Condition cond) {
   DCHECK(!AreAliased(left, right, scratch));
   Label not_identical, return_equal, heap_number;
   Register result = x0;
@@ -231,14 +230,6 @@
     // Call runtime on identical SIMD values since we must throw a TypeError.
     __ Cmp(right_type, SIMD128_VALUE_TYPE);
     __ B(eq, slow);
-    if (is_strong(strength)) {
-      // Call the runtime on anything that is converted in the semantics, since
-      // we need to throw a TypeError. Smis have already been ruled out.
-      __ Cmp(right_type, Operand(HEAP_NUMBER_TYPE));
-      __ B(eq, &return_equal);
-      __ Tst(right_type, Operand(kIsNotStringMask));
-      __ B(ne, slow);
-    }
   } else if (cond == eq) {
     __ JumpIfHeapNumber(right, &heap_number);
   } else {
@@ -253,13 +244,6 @@
     // Call runtime on identical SIMD values since we must throw a TypeError.
     __ Cmp(right_type, SIMD128_VALUE_TYPE);
     __ B(eq, slow);
-    if (is_strong(strength)) {
-      // Call the runtime on anything that is converted in the semantics,
-      // since we need to throw a TypeError. Smis and heap numbers have
-      // already been ruled out.
-      __ Tst(right_type, Operand(kIsNotStringMask));
-      __ B(ne, slow);
-    }
     // Normally here we fall through to return_equal, but undefined is
     // special: (undefined == undefined) == true, but
     // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
@@ -443,54 +427,49 @@
 
 // Fast negative check for internalized-to-internalized equality.
 // See call site for description.
-static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
-                                                     Register left,
-                                                     Register right,
-                                                     Register left_map,
-                                                     Register right_map,
-                                                     Register left_type,
-                                                     Register right_type,
-                                                     Label* possible_strings,
-                                                     Label* not_both_strings) {
+static void EmitCheckForInternalizedStringsOrObjects(
+    MacroAssembler* masm, Register left, Register right, Register left_map,
+    Register right_map, Register left_type, Register right_type,
+    Label* possible_strings, Label* runtime_call) {
   DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
   Register result = x0;
+  DCHECK(left.is(x0) || right.is(x0));
 
-  Label object_test;
+  Label object_test, return_unequal, undetectable;
   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
   // TODO(all): reexamine this branch sequence for optimisation wrt branch
   // prediction.
   __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
   __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
-  __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
+  __ Tbnz(left_type, MaskToBit(kIsNotStringMask), runtime_call);
   __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
 
-  // Both are internalized. We already checked that they weren't the same
-  // pointer, so they are not equal.
-  __ Mov(result, NOT_EQUAL);
+  // Both are internalized. We already checked they weren't the same pointer so
+  // they are not equal. Return non-equal by returning the non-zero object
+  // pointer in x0.
   __ Ret();
 
   __ Bind(&object_test);
 
-  __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
-
-  // If right >= FIRST_JS_RECEIVER_TYPE, test left.
-  // Otherwise, right < FIRST_JS_RECEIVER_TYPE, so set lt condition.
-  __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NFlag, ge);
-
-  __ B(lt, not_both_strings);
-
-  // If both objects are undetectable, they are equal. Otherwise, they are not
-  // equal, since they are different objects and an object is not equal to
-  // undefined.
-
-  // Returning here, so we can corrupt right_type and left_type.
-  Register right_bitfield = right_type;
   Register left_bitfield = left_type;
+  Register right_bitfield = right_type;
   __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
   __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
-  __ And(result, right_bitfield, left_bitfield);
-  __ And(result, result, 1 << Map::kIsUndetectable);
-  __ Eor(result, result, 1 << Map::kIsUndetectable);
+  __ Tbnz(right_bitfield, MaskToBit(1 << Map::kIsUndetectable), &undetectable);
+  __ Tbnz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
+
+  __ CompareInstanceType(right_map, right_type, FIRST_JS_RECEIVER_TYPE);
+  __ B(lt, runtime_call);
+  __ CompareInstanceType(left_map, left_type, FIRST_JS_RECEIVER_TYPE);
+  __ B(lt, runtime_call);
+
+  __ bind(&return_unequal);
+  // Return non-equal by returning the non-zero object pointer in x0.
+  __ Ret();
+
+  __ bind(&undetectable);
+  __ Tbz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
+  __ Mov(result, EQUAL);
   __ Ret();
 }
 
@@ -536,8 +515,7 @@
 
   // Handle the case where the objects are identical. Either returns the answer
   // or goes to slow. Only falls through if the objects were not identical.
-  EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond,
-                                strength());
+  EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
 
   // If either is a smi (we know that at least one is not a smi), then they can
   // only be strictly equal if the other is a HeapNumber.
@@ -650,11 +628,19 @@
 
   __ Bind(&slow);
 
-  __ Push(lhs, rhs);
-  // Figure out which native to call and setup the arguments.
   if (cond == eq) {
-    __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(lhs, rhs);
+      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+    }
+    // Turn true into 0 and false into some non-zero value.
+    STATIC_ASSERT(EQUAL == 0);
+    __ LoadRoot(x1, Heap::kTrueValueRootIndex);
+    __ Sub(x0, x0, x1);
+    __ Ret();
   } else {
+    __ Push(lhs, rhs);
     int ncr;  // NaN compare result
     if ((cond == lt) || (cond == le)) {
       ncr = GREATER;
@@ -667,8 +653,7 @@
 
     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
     // tagged as a small integer.
-    __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
-                                             : Runtime::kCompare);
+    __ TailCallRuntime(Runtime::kCompare);
   }
 
   __ Bind(&miss);
@@ -971,8 +956,6 @@
     __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
                           result_double);
     DCHECK(result_tagged.is(x0));
-    __ IncrementCounter(
-        isolate()->counters()->math_pow(), 1, scratch0, scratch1);
     __ Ret();
   } else {
     AllowExternalCallThatCantCauseGC scope(masm);
@@ -984,8 +967,6 @@
         0, 2);
     __ Mov(lr, saved_lr);
     __ Bind(&done);
-    __ IncrementCounter(
-        isolate()->counters()->math_pow(), 1, scratch0, scratch1);
     __ Ret();
   }
 }
@@ -1104,10 +1085,13 @@
     __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
   }
 
-  // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
-  // registers.
+  // Reserve three slots to preserve x21-x23 callee-saved registers. If the
+  // result size is too large to be returned in registers then also reserve
+  // space for the return value.
+  int extra_stack_space = 3 + (result_size() <= 2 ? 0 : result_size());
+  // Enter the exit frame.
   FrameScope scope(masm, StackFrame::MANUAL);
-  __ EnterExitFrame(save_doubles(), x10, 3);
+  __ EnterExitFrame(save_doubles(), x10, extra_stack_space);
   DCHECK(csp.Is(__ StackPointer()));
 
   // Poke callee-saved registers into reserved space.
@@ -1115,6 +1099,11 @@
   __ Poke(argc, 2 * kPointerSize);
   __ Poke(target, 3 * kPointerSize);
 
+  if (result_size() > 2) {
+    // Save the location of the return value into x8 for call.
+    __ Add(x8, __ StackPointer(), Operand(4 * kPointerSize));
+  }
+
   // We normally only keep tagged values in callee-saved registers, as they
   // could be pushed onto the stack by called stubs and functions, and on the
   // stack they can confuse the GC. However, we're only calling C functions
@@ -1184,7 +1173,18 @@
   __ Blr(target);
   __ Bind(&return_location);
 
-  //  x0    result      The return code from the call.
+  if (result_size() > 2) {
+    DCHECK_EQ(3, result_size());
+    // Read result values stored on stack.
+    __ Ldr(x0, MemOperand(__ StackPointer(), 4 * kPointerSize));
+    __ Ldr(x1, MemOperand(__ StackPointer(), 5 * kPointerSize));
+    __ Ldr(x2, MemOperand(__ StackPointer(), 6 * kPointerSize));
+  }
+  // Result returned in x0, x1:x0 or x2:x1:x0 - do not destroy these registers!
+
+  //  x0    result0      The return code from the call.
+  //  x1    result1      For calls which return ObjectPair or ObjectTriple.
+  //  x2    result2      For calls which return ObjectTriple.
   //  x21   argv
   //  x22   argc
   //  x23   target
@@ -1616,363 +1616,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
-  Register key = ArgumentsAccessReadDescriptor::index();
-  DCHECK(arg_count.is(x0));
-  DCHECK(key.is(x1));
-
-  // The displacement is the offset of the last parameter (if any) relative
-  // to the frame pointer.
-  static const int kDisplacement =
-      StandardFrameConstants::kCallerSPOffset - kPointerSize;
-
-  // Check that the key is a smi.
-  Label slow;
-  __ JumpIfNotSmi(key, &slow);
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Register local_fp = x11;
-  Register caller_fp = x11;
-  Register caller_ctx = x12;
-  Label skip_adaptor;
-  __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ Ldr(caller_ctx, MemOperand(caller_fp,
-                                StandardFrameConstants::kContextOffset));
-  __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ Csel(local_fp, fp, caller_fp, ne);
-  __ B(ne, &skip_adaptor);
-
-  // Load the actual arguments limit found in the arguments adaptor frame.
-  __ Ldr(arg_count, MemOperand(caller_fp,
-                               ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ Bind(&skip_adaptor);
-
-  // Check index against formal parameters count limit. Use unsigned comparison
-  // to get negative check for free: branch if key < 0 or key >= arg_count.
-  __ Cmp(key, arg_count);
-  __ B(hs, &slow);
-
-  // Read the argument from the stack and return it.
-  __ Sub(x10, arg_count, key);
-  __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
-  __ Ldr(x0, MemOperand(x10, kDisplacement));
-  __ Ret();
-
-  // Slow case: handle non-smi or out-of-bounds access to arguments by calling
-  // the runtime system.
-  __ Bind(&slow);
-  __ Push(key);
-  __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
-  // x1 : function
-  // x2 : number of parameters (tagged)
-  // x3 : parameters pointer
-
-  DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
-  Register caller_fp = x10;
-  __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  // Load and untag the context.
-  __ Ldr(w11, UntagSmiMemOperand(caller_fp,
-                                 StandardFrameConstants::kContextOffset));
-  __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
-  __ B(ne, &runtime);
-
-  // Patch the arguments.length and parameters pointer in the current frame.
-  __ Ldr(x2,
-         MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ Add(x3, caller_fp, Operand::UntagSmiAndScale(x2, kPointerSizeLog2));
-  __ Add(x3, x3, StandardFrameConstants::kCallerSPOffset);
-
-  __ Bind(&runtime);
-  __ Push(x1, x3, x2);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
-  // x1 : function
-  // x2 : number of parameters (tagged)
-  // x3 : parameters pointer
-  //
-  // Returns pointer to result object in x0.
-
-  DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Make an untagged copy of the parameter count.
-  // Note: arg_count_smi is an alias of param_count_smi.
-  Register function = x1;
-  Register arg_count_smi = x2;
-  Register param_count_smi = x2;
-  Register recv_arg = x3;
-  Register param_count = x7;
-  __ SmiUntag(param_count, param_count_smi);
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Register caller_fp = x11;
-  Register caller_ctx = x12;
-  Label runtime;
-  Label adaptor_frame, try_allocate;
-  __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ Ldr(caller_ctx, MemOperand(caller_fp,
-                                StandardFrameConstants::kContextOffset));
-  __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ B(eq, &adaptor_frame);
-
-  // No adaptor, parameter count = argument count.
-
-  //   x1   function      function pointer
-  //   x2   arg_count_smi number of function arguments (smi)
-  //   x3   recv_arg      pointer to receiver arguments
-  //   x4   mapped_params number of mapped params, min(params, args) (uninit)
-  //   x7   param_count   number of function parameters
-  //   x11  caller_fp     caller's frame pointer
-  //   x14  arg_count     number of function arguments (uninit)
-
-  Register arg_count = x14;
-  Register mapped_params = x4;
-  __ Mov(arg_count, param_count);
-  __ Mov(mapped_params, param_count);
-  __ B(&try_allocate);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ Bind(&adaptor_frame);
-  __ Ldr(arg_count_smi,
-         MemOperand(caller_fp,
-                    ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(arg_count, arg_count_smi);
-  __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
-  __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
-
-  // Compute the mapped parameter count = min(param_count, arg_count)
-  __ Cmp(param_count, arg_count);
-  __ Csel(mapped_params, param_count, arg_count, lt);
-
-  __ Bind(&try_allocate);
-
-  //   x0   alloc_obj     pointer to allocated objects: param map, backing
-  //                      store, arguments (uninit)
-  //   x1   function      function pointer
-  //   x2   arg_count_smi number of function arguments (smi)
-  //   x3   recv_arg      pointer to receiver arguments
-  //   x4   mapped_params number of mapped parameters, min(params, args)
-  //   x7   param_count   number of function parameters
-  //   x10  size          size of objects to allocate (uninit)
-  //   x14  arg_count     number of function arguments
-
-  // Compute the size of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has two extra words containing context and backing
-  // store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-
-  // Calculate the parameter map size, assuming it exists.
-  Register size = x10;
-  __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
-  __ Add(size, size, kParameterMapHeaderSize);
-
-  // If there are no mapped parameters, set the running size total to zero.
-  // Otherwise, use the parameter map size calculated earlier.
-  __ Cmp(mapped_params, 0);
-  __ CzeroX(size, eq);
-
-  // 2. Add the size of the backing store and arguments object.
-  __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
-  __ Add(size, size,
-         FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize);
-
-  // Do the allocation of all three objects in one go. Assign this to x0, as it
-  // will be returned to the caller.
-  Register alloc_obj = x0;
-  __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
-
-  // Get the arguments boilerplate from the current (global) context.
-
-  //   x0   alloc_obj       pointer to allocated objects (param map, backing
-  //                        store, arguments)
-  //   x1   function        function pointer
-  //   x2   arg_count_smi   number of function arguments (smi)
-  //   x3   recv_arg        pointer to receiver arguments
-  //   x4   mapped_params   number of mapped parameters, min(params, args)
-  //   x7   param_count     number of function parameters
-  //   x11  sloppy_args_map offset to args (or aliased args) map (uninit)
-  //   x14  arg_count       number of function arguments
-
-  Register global_ctx = x10;
-  Register sloppy_args_map = x11;
-  Register aliased_args_map = x10;
-  __ Ldr(global_ctx, NativeContextMemOperand());
-
-  __ Ldr(sloppy_args_map,
-         ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
-  __ Ldr(
-      aliased_args_map,
-      ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
-  __ Cmp(mapped_params, 0);
-  __ CmovX(sloppy_args_map, aliased_args_map, ne);
-
-  // Copy the JS object part.
-  __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
-  __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
-  __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
-  __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
-  // Set up the callee in-object property.
-  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
-  const int kCalleeOffset = JSObject::kHeaderSize +
-                            Heap::kArgumentsCalleeIndex * kPointerSize;
-  __ AssertNotSmi(function);
-  __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
-
-  // Use the length and set that as an in-object property.
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  const int kLengthOffset = JSObject::kHeaderSize +
-                            Heap::kArgumentsLengthIndex * kPointerSize;
-  __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, "elements" will point there, otherwise
-  // it will point to the backing store.
-
-  //   x0   alloc_obj     pointer to allocated objects (param map, backing
-  //                      store, arguments)
-  //   x1   function      function pointer
-  //   x2   arg_count_smi number of function arguments (smi)
-  //   x3   recv_arg      pointer to receiver arguments
-  //   x4   mapped_params number of mapped parameters, min(params, args)
-  //   x5   elements      pointer to parameter map or backing store (uninit)
-  //   x6   backing_store pointer to backing store (uninit)
-  //   x7   param_count   number of function parameters
-  //   x14  arg_count     number of function arguments
-
-  Register elements = x5;
-  __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
-  __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ Cmp(mapped_params, 0);
-  // Set up backing store address, because it is needed later for filling in
-  // the unmapped arguments.
-  Register backing_store = x6;
-  __ CmovX(backing_store, elements, eq);
-  __ B(eq, &skip_parameter_map);
-
-  __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
-  __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
-  __ Add(x10, mapped_params, 2);
-  __ SmiTag(x10);
-  __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Str(cp, FieldMemOperand(elements,
-                             FixedArray::kHeaderSize + 0 * kPointerSize));
-  __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
-  __ Add(x10, x10, kParameterMapHeaderSize);
-  __ Str(x10, FieldMemOperand(elements,
-                              FixedArray::kHeaderSize + 1 * kPointerSize));
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. Then index the context,
-  // where parameters are stored in reverse order, at:
-  //
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
-  //
-  // The mapped parameter thus needs to get indices:
-  //
-  //   MIN_CONTEXT_SLOTS + parameter_count - 1 ..
-  //     MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
-  //
-  // We loop from right to left.
-
-  //   x0   alloc_obj     pointer to allocated objects (param map, backing
-  //                      store, arguments)
-  //   x1   function      function pointer
-  //   x2   arg_count_smi number of function arguments (smi)
-  //   x3   recv_arg      pointer to receiver arguments
-  //   x4   mapped_params number of mapped parameters, min(params, args)
-  //   x5   elements      pointer to parameter map or backing store (uninit)
-  //   x6   backing_store pointer to backing store (uninit)
-  //   x7   param_count   number of function parameters
-  //   x11  loop_count    parameter loop counter (uninit)
-  //   x12  index         parameter index (smi, uninit)
-  //   x13  the_hole      hole value (uninit)
-  //   x14  arg_count     number of function arguments
-
-  Register loop_count = x11;
-  Register index = x12;
-  Register the_hole = x13;
-  Label parameters_loop, parameters_test;
-  __ Mov(loop_count, mapped_params);
-  __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
-  __ Sub(index, index, mapped_params);
-  __ SmiTag(index);
-  __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
-  __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
-  __ Add(backing_store, backing_store, kParameterMapHeaderSize);
-
-  __ B(&parameters_test);
-
-  __ Bind(&parameters_loop);
-  __ Sub(loop_count, loop_count, 1);
-  __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
-  __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
-  __ Str(index, MemOperand(elements, x10));
-  __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
-  __ Str(the_hole, MemOperand(backing_store, x10));
-  __ Add(index, index, Smi::FromInt(1));
-  __ Bind(&parameters_test);
-  __ Cbnz(loop_count, &parameters_loop);
-
-  __ Bind(&skip_parameter_map);
-  // Copy arguments header and remaining slots (if there are any.)
-  __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
-  __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
-  __ Str(arg_count_smi, FieldMemOperand(backing_store,
-                                        FixedArray::kLengthOffset));
-
-  //   x0   alloc_obj     pointer to allocated objects (param map, backing
-  //                      store, arguments)
-  //   x1   function      function pointer
-  //   x2   arg_count_smi number of function arguments (smi)
-  //   x3   recv_arg      pointer to receiver arguments
-  //   x4   mapped_params number of mapped parameters, min(params, args)
-  //   x6   backing_store pointer to backing store (uninit)
-  //   x14  arg_count     number of function arguments
-
-  Label arguments_loop, arguments_test;
-  __ Mov(x10, mapped_params);
-  __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
-  __ B(&arguments_test);
-
-  __ Bind(&arguments_loop);
-  __ Sub(recv_arg, recv_arg, kPointerSize);
-  __ Ldr(x11, MemOperand(recv_arg));
-  __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
-  __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
-  __ Add(x10, x10, 1);
-
-  __ Bind(&arguments_test);
-  __ Cmp(x10, arg_count);
-  __ B(lt, &arguments_loop);
-
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  __ Bind(&runtime);
-  __ Push(function, recv_arg, arg_count_smi);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
   // Return address is in lr.
   Label slow;
@@ -1993,182 +1636,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
-  // x1 : function
-  // x2 : number of parameters (tagged)
-  // x3 : parameters pointer
-  //
-  // Returns pointer to result object in x0.
-
-  DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Make an untagged copy of the parameter count.
-  Register function = x1;
-  Register param_count_smi = x2;
-  Register params = x3;
-  Register param_count = x13;
-  __ SmiUntag(param_count, param_count_smi);
-
-  // Test if arguments adaptor needed.
-  Register caller_fp = x11;
-  Register caller_ctx = x12;
-  Label try_allocate, runtime;
-  __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ Ldr(caller_ctx, MemOperand(caller_fp,
-                                StandardFrameConstants::kContextOffset));
-  __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ B(ne, &try_allocate);
-
-  //   x1   function          function pointer
-  //   x2   param_count_smi   number of parameters passed to function (smi)
-  //   x3   params            pointer to parameters
-  //   x11  caller_fp         caller's frame pointer
-  //   x13  param_count       number of parameters passed to function
-
-  // Patch the argument length and parameters pointer.
-  __ Ldr(param_count_smi,
-         MemOperand(caller_fp,
-                    ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(param_count, param_count_smi);
-  __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
-  __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
-
-  // Try the new space allocation. Start out with computing the size of the
-  // arguments object and the elements array in words.
-  Register size = x10;
-  __ Bind(&try_allocate);
-  __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
-  __ Cmp(param_count, 0);
-  __ CzeroX(size, eq);
-  __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
-
-  // Do the allocation of both objects in one go. Assign this to x0, as it will
-  // be returned to the caller.
-  Register alloc_obj = x0;
-  __ Allocate(size, alloc_obj, x11, x12, &runtime,
-              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
-  // Get the arguments boilerplate from the current (native) context.
-  Register strict_args_map = x4;
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX,
-                           strict_args_map);
-
-  //   x0   alloc_obj         pointer to allocated objects: parameter array and
-  //                          arguments object
-  //   x1   function          function pointer
-  //   x2   param_count_smi   number of parameters passed to function (smi)
-  //   x3   params            pointer to parameters
-  //   x4   strict_args_map   offset to arguments map
-  //   x13  param_count       number of parameters passed to function
-  __ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
-  __ LoadRoot(x5, Heap::kEmptyFixedArrayRootIndex);
-  __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
-  __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
-  // Set the smi-tagged length as an in-object property.
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  const int kLengthOffset = JSObject::kHeaderSize +
-                            Heap::kArgumentsLengthIndex * kPointerSize;
-  __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
-
-  // If there are no actual arguments, we're done.
-  Label done;
-  __ Cbz(param_count, &done);
-
-  // Set up the elements pointer in the allocated arguments object and
-  // initialize the header in the elements fixed array.
-  Register elements = x5;
-  __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
-  __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-  __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
-  __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
-  __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
-  //   x0   alloc_obj         pointer to allocated objects: parameter array and
-  //                          arguments object
-  //   x1   function          function pointer
-  //   x2   param_count_smi   number of parameters passed to function (smi)
-  //   x3   params            pointer to parameters
-  //   x4   array             pointer to array slot (uninit)
-  //   x5   elements          pointer to elements array of alloc_obj
-  //   x13  param_count       number of parameters passed to function
-
-  // Copy the fixed array slots.
-  Label loop;
-  Register array = x4;
-  // Set up pointer to first array slot.
-  __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-
-  __ Bind(&loop);
-  // Pre-decrement the parameters pointer by kPointerSize on each iteration.
-  // Pre-decrement in order to skip receiver.
-  __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
-  // Post-increment elements by kPointerSize on each iteration.
-  __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
-  __ Sub(param_count, param_count, 1);
-  __ Cbnz(param_count, &loop);
-
-  // Return from stub.
-  __ Bind(&done);
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  __ Bind(&runtime);
-  __ Push(function, params, param_count_smi);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
-  // x2 : number of parameters (tagged)
-  // x3 : parameters pointer
-  // x4 : rest parameter index (tagged)
-  //
-  // Returns pointer to result object in x0.
-
-  DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(x3.is(RestParamAccessDescriptor::parameter_pointer()));
-  DCHECK(x4.is(RestParamAccessDescriptor::rest_parameter_index()));
-
-  // Get the stub arguments from the frame, and make an untagged copy of the
-  // parameter count.
-  Register rest_index_smi = x4;
-  Register param_count_smi = x2;
-  Register params = x3;
-  Register param_count = x13;
-  __ SmiUntag(param_count, param_count_smi);
-
-  // Test if arguments adaptor needed.
-  Register caller_fp = x11;
-  Register caller_ctx = x12;
-  Label runtime;
-  __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ Ldr(caller_ctx,
-         MemOperand(caller_fp, StandardFrameConstants::kContextOffset));
-  __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ B(ne, &runtime);
-
-  //   x4   rest_index_smi     index of rest parameter
-  //   x2   param_count_smi    number of parameters passed to function (smi)
-  //   x3   params             pointer to parameters
-  //   x11  caller_fp          caller's frame pointer
-  //   x13  param_count        number of parameters passed to function
-
-  // Patch the argument length and parameters pointer.
-  __ Ldr(param_count_smi,
-         MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(param_count, param_count_smi);
-  __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
-  __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
-
-  __ Bind(&runtime);
-  __ Push(param_count_smi, params, rest_index_smi);
-  __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec);
@@ -2280,35 +1747,35 @@
   __ Peek(subject, kSubjectOffset);
   __ JumpIfSmi(subject, &runtime);
 
-  __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
-
   __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
 
   // Handle subject string according to its encoding and representation:
-  // (1) Sequential string?  If yes, go to (5).
-  // (2) Anything but sequential or cons?  If yes, go to (6).
-  // (3) Cons string.  If the string is flat, replace subject with first string.
-  //     Otherwise bailout.
-  // (4) Is subject external?  If yes, go to (7).
-  // (5) Sequential string.  Load regexp code according to encoding.
+  // (1) Sequential string?  If yes, go to (4).
+  // (2) Sequential or cons?  If not, go to (5).
+  // (3) Cons string.  If the string is flat, replace subject with first string
+  //     and go to (1). Otherwise bail out to runtime.
+  // (4) Sequential string.  Load regexp code according to encoding.
   // (E) Carry on.
   /// [...]
 
   // Deferred code at the end of the stub:
-  // (6) Not a long external string?  If yes, go to (8).
-  // (7) External string.  Make it, offset-wise, look like a sequential string.
-  //     Go to (5).
-  // (8) Short external string or not a string?  If yes, bail out to runtime.
-  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+  // (5) Long external string?  If not, go to (7).
+  // (6) External string.  Make it, offset-wise, look like a sequential string.
+  //     Go to (4).
+  // (7) Short external string or not a string?  If yes, bail out to runtime.
+  // (8) Sliced string.  Replace subject with parent.  Go to (1).
 
-  Label check_underlying;   // (4)
-  Label seq_string;         // (5)
-  Label not_seq_nor_cons;   // (6)
-  Label external_string;    // (7)
-  Label not_long_external;  // (8)
+  Label check_underlying;   // (1)
+  Label seq_string;         // (4)
+  Label not_seq_nor_cons;   // (5)
+  Label external_string;    // (6)
+  Label not_long_external;  // (7)
 
-  // (1) Sequential string?  If yes, go to (5).
+  __ Bind(&check_underlying);
+  __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
+
+  // (1) Sequential string?  If yes, go to (4).
   __ And(string_representation,
          string_type,
          kIsNotStringMask |
@@ -2325,36 +1792,24 @@
   //                is a String
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ Cbz(string_representation, &seq_string);  // Go to (5).
+  __ Cbz(string_representation, &seq_string);  // Go to (4).
 
-  // (2) Anything but sequential or cons?  If yes, go to (6).
+  // (2) Sequential or cons?  If not, go to (5).
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ Cmp(string_representation, kExternalStringTag);
-  __ B(ge, &not_seq_nor_cons);  // Go to (6).
+  __ B(ge, &not_seq_nor_cons);  // Go to (5).
 
   // (3) Cons string.  Check that it's flat.
   __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
   __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
   // Replace subject with first string.
   __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  __ B(&check_underlying);
 
-  // (4) Is subject external?  If yes, go to (7).
-  __ Bind(&check_underlying);
-  // Reload the string type.
-  __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kSeqStringTag == 0);
-  // The underlying external string is never a short external string.
-  STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
-  STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
-  __ TestAndBranchIfAnySet(string_type.X(),
-                           kStringRepresentationMask,
-                           &external_string);  // Go to (7).
-
-  // (5) Sequential string.  Load regexp code according to encoding.
+  // (4) Sequential string.  Load regexp code according to encoding.
   __ Bind(&seq_string);
 
   // Check that the third argument is a positive smi less than the subject
@@ -2624,12 +2079,12 @@
   __ TailCallRuntime(Runtime::kRegExpExec);
 
   // Deferred code for string handling.
-  // (6) Not a long external string?  If yes, go to (8).
+  // (5) Long external string?  If not, go to (7).
   __ Bind(&not_seq_nor_cons);
   // Compare flags are still set.
-  __ B(ne, &not_long_external);  // Go to (8).
+  __ B(ne, &not_long_external);  // Go to (7).
 
-  // (7) External string. Make it, offset-wise, look like a sequential string.
+  // (6) External string. Make it, offset-wise, look like a sequential string.
   __ Bind(&external_string);
   if (masm->emit_debug_code()) {
     // Assert that we do not have a cons or slice (indirect strings) here.
@@ -2647,9 +2102,9 @@
   // Move the pointer so that offset-wise, it looks like a sequential string.
   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
-  __ B(&seq_string);    // Go to (5).
+  __ B(&seq_string);  // Go to (4).
 
-  // (8) If this is a short external string or not a string, bail out to
+  // (7) If this is a short external string or not a string, bail out to
   // runtime.
   __ Bind(&not_long_external);
   STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -2657,11 +2112,11 @@
                            kShortExternalStringMask | kIsNotStringMask,
                            &runtime);
 
-  // (9) Sliced string. Replace subject with parent.
+  // (8) Sliced string. Replace subject with parent.
   __ Ldr(sliced_string_offset,
          UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
-  __ B(&check_underlying);    // Go to (4).
+  __ B(&check_underlying);  // Go to (1).
 #endif
 }
 
@@ -2917,7 +2372,8 @@
 
   __ Bind(&call_function);
   __ Mov(x0, argc);
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+                                                    tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&extra_checks_or_miss);
@@ -2951,7 +2407,7 @@
 
   __ Bind(&call);
   __ Mov(x0, argc);
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&uninitialized);
@@ -3151,18 +2607,14 @@
 
   __ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   __ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
-  if (op() != Token::EQ_STRICT && is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
-  } else {
-    if (!Token::IsEqualityOp(op())) {
-      __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
-      __ AssertSmi(x1);
-      __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
-      __ AssertSmi(x0);
-    }
-    __ Sub(x0, x1, x0);
-    __ Ret();
+  if (!Token::IsEqualityOp(op())) {
+    __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
+    __ AssertSmi(x1);
+    __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
+    __ AssertSmi(x0);
   }
+  __ Sub(x0, x1, x0);
+  __ Ret();
 
   __ Bind(&miss);
   GenerateMiss(masm);
@@ -3236,7 +2688,7 @@
   __ Ret();
 
   __ Bind(&unordered);
-  CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
                      CompareICState::GENERIC, CompareICState::GENERIC);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
@@ -3467,8 +2919,6 @@
   if (Token::IsEqualityOp(op())) {
   __ Sub(result, rhs, lhs);
   __ Ret();
-  } else if (is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   } else {
     Register ncr = x2;
     if (op() == Token::LT || op() == Token::LTE) {
@@ -3859,6 +3309,39 @@
 }
 
 
+void ToNameStub::Generate(MacroAssembler* masm) {
+  // The ToName stub takes one argument in x0.
+  Label is_number;
+  __ JumpIfSmi(x0, &is_number);
+
+  Label not_name;
+  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+  __ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, &not_name, hi);
+  // x0: receiver
+  // x1: receiver instance type
+  __ Ret();
+  __ Bind(&not_name);
+
+  Label not_heap_number;
+  __ Cmp(x1, HEAP_NUMBER_TYPE);
+  __ B(ne, &not_heap_number);
+  __ Bind(&is_number);
+  NumberToStringStub stub(isolate());
+  __ TailCallStub(&stub);
+  __ Bind(&not_heap_number);
+
+  Label not_oddball;
+  __ Cmp(x1, ODDBALL_TYPE);
+  __ B(ne, &not_oddball);
+  __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
+  __ Ret();
+  __ Bind(&not_oddball);
+
+  __ Push(x0);  // Push argument.
+  __ TailCallRuntime(Runtime::kToName);
+}
+
+
 void StringHelper::GenerateFlatOneByteStringEquals(
     MacroAssembler* masm, Register left, Register right, Register scratch1,
     Register scratch2, Register scratch3) {
@@ -4042,8 +3525,7 @@
     __ Ldr(val, MemOperand(regs_.address()));
     __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
 
-    __ CheckPageFlagSet(regs_.object(), val, 1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                        &dont_need_remembered_set);
+    __ JumpIfInNewSpace(regs_.object(), &dont_need_remembered_set);
 
     // First notify the incremental marker if necessary, then update the
     // remembered set.
@@ -5343,6 +4825,672 @@
 }
 
 
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x1 : target
+  //  -- x3 : new target
+  //  -- cp : context
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(x1);
+  __ AssertReceiver(x3);
+
+  // Verify that the new target is a JSFunction.
+  Label new_object;
+  __ JumpIfNotObjectType(x3, x2, x2, JS_FUNCTION_TYPE, &new_object);
+
+  // Load the initial map and verify that it's in fact a map.
+  __ Ldr(x2, FieldMemOperand(x3, JSFunction::kPrototypeOrInitialMapOffset));
+  __ JumpIfSmi(x2, &new_object);
+  __ JumpIfNotObjectType(x2, x0, x0, MAP_TYPE, &new_object);
+
+  // Fall back to runtime if the target differs from the new target's
+  // initial map constructor.
+  __ Ldr(x0, FieldMemOperand(x2, Map::kConstructorOrBackPointerOffset));
+  __ CompareAndBranch(x0, x1, ne, &new_object);
+
+  // Allocate the JSObject on the heap.
+  Label allocate, done_allocate;
+  __ Ldrb(x4, FieldMemOperand(x2, Map::kInstanceSizeOffset));
+  __ Allocate(x4, x0, x5, x6, &allocate, SIZE_IN_WORDS);
+  __ Bind(&done_allocate);
+
+  // Initialize the JSObject fields.
+  __ Mov(x1, x0);
+  STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
+  __ Str(x2, MemOperand(x1, kPointerSize, PostIndex));
+  __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
+  STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
+  STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
+  __ Stp(x3, x3, MemOperand(x1, 2 * kPointerSize, PostIndex));
+  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+
+  // ----------- S t a t e -------------
+  //  -- x0 : result (untagged)
+  //  -- x1 : result fields (untagged)
+  //  -- x5 : result end (untagged)
+  //  -- x2 : initial map
+  //  -- cp : context
+  //  -- lr : return address
+  // -----------------------------------
+
+  // Perform in-object slack tracking if requested.
+  Label slack_tracking;
+  STATIC_ASSERT(Map::kNoSlackTracking == 0);
+  __ LoadRoot(x6, Heap::kUndefinedValueRootIndex);
+  __ Ldr(w3, FieldMemOperand(x2, Map::kBitField3Offset));
+  __ TestAndBranchIfAnySet(w3, Map::ConstructionCounter::kMask,
+                           &slack_tracking);
+  {
+    // Initialize all in-object fields with undefined.
+    __ InitializeFieldsWithFiller(x1, x5, x6);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ Add(x0, x0, kHeapObjectTag);
+    __ Ret();
+  }
+  __ Bind(&slack_tracking);
+  {
+    // Decrease generous allocation count.
+    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+    __ Sub(w3, w3, 1 << Map::ConstructionCounter::kShift);
+    __ Str(w3, FieldMemOperand(x2, Map::kBitField3Offset));
+
+    // Initialize the in-object fields with undefined.
+    __ Ldrb(x4, FieldMemOperand(x2, Map::kUnusedPropertyFieldsOffset));
+    __ Sub(x4, x5, Operand(x4, LSL, kPointerSizeLog2));
+    __ InitializeFieldsWithFiller(x1, x4, x6);
+
+    // Initialize the remaining (reserved) fields with one pointer filler map.
+    __ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
+    __ InitializeFieldsWithFiller(x1, x5, x6);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ Add(x0, x0, kHeapObjectTag);
+
+    // Check if we can finalize the instance size.
+    Label finalize;
+    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+    __ TestAndBranchIfAllClear(w3, Map::ConstructionCounter::kMask, &finalize);
+    __ Ret();
+
+    // Finalize the instance size.
+    __ Bind(&finalize);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(x0, x2);
+      __ CallRuntime(Runtime::kFinalizeInstanceSize);
+      __ Pop(x0);
+    }
+    __ Ret();
+  }
+
+  // Fall back to %AllocateInNewSpace.
+  __ Bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize == 1);
+    __ Mov(x4,
+           Operand(x4, LSL, kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
+    __ Push(x2, x4);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Pop(x2);
+  }
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ Sub(x0, x0, kHeapObjectTag);
+  __ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
+  __ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
+  __ B(&done_allocate);
+
+  // Fall back to %NewObject.
+  __ Bind(&new_object);
+  __ Push(x1, x3);
+  __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(x1);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make x2 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ Mov(x2, fp);
+    __ B(&loop_entry);
+    __ Bind(&loop);
+    __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+    __ Bind(&loop_entry);
+    __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+    __ Cmp(x3, x1);
+    __ B(ne, &loop);
+  }
+
+  // Check if we have rest parameters (only possible if we have an
+  // arguments adaptor frame below the function frame).
+  Label no_rest_parameters;
+  __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+  __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kContextOffset));
+  __ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ B(ne, &no_rest_parameters);
+
+  // Check if the arguments adaptor frame contains more arguments than
+  // specified by the function's internal formal parameter count.
+  Label rest_parameters;
+  __ Ldrsw(x0, UntagSmiMemOperand(
+                   x2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldrsw(
+      x1, FieldMemOperand(x1, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ Subs(x0, x0, x1);
+  __ B(gt, &rest_parameters);
+
+  // Return an empty rest parameter array.
+  __ Bind(&no_rest_parameters);
+  {
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- lr : return address
+    // -----------------------------------
+
+    // Allocate an empty rest parameter array.
+    Label allocate, done_allocate;
+    __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, TAG_OBJECT);
+    __ Bind(&done_allocate);
+
+    // Setup the rest parameter array in x0.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
+    __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
+    __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
+    __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
+    __ Str(x1, FieldMemOperand(x0, JSArray::kElementsOffset));
+    __ Mov(x1, Smi::FromInt(0));
+    __ Str(x1, FieldMemOperand(x0, JSArray::kLengthOffset));
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ Bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(Smi::FromInt(JSArray::kSize));
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+    }
+    __ B(&done_allocate);
+  }
+
+  __ Bind(&rest_parameters);
+  {
+    // Compute the pointer to the first rest parameter (skippping the receiver).
+    __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
+    __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
+
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- x0 : number of rest parameters
+    //  -- x2 : pointer to first rest parameters
+    //  -- lr : return address
+    // -----------------------------------
+
+    // Allocate space for the rest parameter array plus the backing store.
+    Label allocate, done_allocate;
+    __ Mov(x1, JSArray::kSize + FixedArray::kHeaderSize);
+    __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
+    __ Allocate(x1, x3, x4, x5, &allocate, TAG_OBJECT);
+    __ Bind(&done_allocate);
+
+    // Compute arguments.length in x6.
+    __ SmiTag(x6, x0);
+
+    // Setup the elements array in x3.
+    __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
+    __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
+    __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
+    __ Add(x4, x3, FixedArray::kHeaderSize);
+    {
+      Label loop, done_loop;
+      __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
+      __ Bind(&loop);
+      __ Cmp(x4, x0);
+      __ B(eq, &done_loop);
+      __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
+      __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
+      __ Sub(x2, x2, Operand(1 * kPointerSize));
+      __ Add(x4, x4, Operand(1 * kPointerSize));
+      __ B(&loop);
+      __ Bind(&done_loop);
+    }
+
+    // Setup the rest parameter array in x0.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
+    __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
+    __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
+    __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
+    __ Str(x3, FieldMemOperand(x0, JSArray::kElementsOffset));
+    __ Str(x6, FieldMemOperand(x0, JSArray::kLengthOffset));
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ Bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(x0);
+      __ SmiTag(x1);
+      __ Push(x0, x2, x1);
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+      __ Mov(x3, x0);
+      __ Pop(x2, x0);
+      __ SmiUntag(x0);
+    }
+    __ B(&done_allocate);
+  }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(x1);
+
+  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+  __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldrsw(
+      x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ Add(x3, fp, Operand(x2, LSL, kPointerSizeLog2));
+  __ Add(x3, x3, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ SmiTag(x2);
+
+  // x1 : function
+  // x2 : number of parameters (tagged)
+  // x3 : parameters pointer
+  //
+  // Returns pointer to result object in x0.
+
+  // Make an untagged copy of the parameter count.
+  // Note: arg_count_smi is an alias of param_count_smi.
+  Register function = x1;
+  Register arg_count_smi = x2;
+  Register param_count_smi = x2;
+  Register recv_arg = x3;
+  Register param_count = x7;
+  __ SmiUntag(param_count, param_count_smi);
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Register caller_fp = x11;
+  Register caller_ctx = x12;
+  Label runtime;
+  Label adaptor_frame, try_allocate;
+  __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ Ldr(caller_ctx, MemOperand(caller_fp,
+                                StandardFrameConstants::kContextOffset));
+  __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ B(eq, &adaptor_frame);
+
+  // No adaptor, parameter count = argument count.
+
+  //   x1   function      function pointer
+  //   x2   arg_count_smi number of function arguments (smi)
+  //   x3   recv_arg      pointer to receiver arguments
+  //   x4   mapped_params number of mapped params, min(params, args) (uninit)
+  //   x7   param_count   number of function parameters
+  //   x11  caller_fp     caller's frame pointer
+  //   x14  arg_count     number of function arguments (uninit)
+
+  Register arg_count = x14;
+  Register mapped_params = x4;
+  __ Mov(arg_count, param_count);
+  __ Mov(mapped_params, param_count);
+  __ B(&try_allocate);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ Bind(&adaptor_frame);
+  __ Ldr(arg_count_smi,
+         MemOperand(caller_fp,
+                    ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(arg_count, arg_count_smi);
+  __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
+  __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
+
+  // Compute the mapped parameter count = min(param_count, arg_count)
+  __ Cmp(param_count, arg_count);
+  __ Csel(mapped_params, param_count, arg_count, lt);
+
+  __ Bind(&try_allocate);
+
+  //   x0   alloc_obj     pointer to allocated objects: param map, backing
+  //                      store, arguments (uninit)
+  //   x1   function      function pointer
+  //   x2   arg_count_smi number of function arguments (smi)
+  //   x3   recv_arg      pointer to receiver arguments
+  //   x4   mapped_params number of mapped parameters, min(params, args)
+  //   x7   param_count   number of function parameters
+  //   x10  size          size of objects to allocate (uninit)
+  //   x14  arg_count     number of function arguments
+
+  // Compute the size of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has two extra words containing context and backing
+  // store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+
+  // Calculate the parameter map size, assuming it exists.
+  Register size = x10;
+  __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
+  __ Add(size, size, kParameterMapHeaderSize);
+
+  // If there are no mapped parameters, set the running size total to zero.
+  // Otherwise, use the parameter map size calculated earlier.
+  __ Cmp(mapped_params, 0);
+  __ CzeroX(size, eq);
+
+  // 2. Add the size of the backing store and arguments object.
+  __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
+  __ Add(size, size, FixedArray::kHeaderSize + JSSloppyArgumentsObject::kSize);
+
+  // Do the allocation of all three objects in one go. Assign this to x0, as it
+  // will be returned to the caller.
+  Register alloc_obj = x0;
+  __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
+
+  // Get the arguments boilerplate from the current (global) context.
+
+  //   x0   alloc_obj       pointer to allocated objects (param map, backing
+  //                        store, arguments)
+  //   x1   function        function pointer
+  //   x2   arg_count_smi   number of function arguments (smi)
+  //   x3   recv_arg        pointer to receiver arguments
+  //   x4   mapped_params   number of mapped parameters, min(params, args)
+  //   x7   param_count     number of function parameters
+  //   x11  sloppy_args_map offset to args (or aliased args) map (uninit)
+  //   x14  arg_count       number of function arguments
+
+  Register global_ctx = x10;
+  Register sloppy_args_map = x11;
+  Register aliased_args_map = x10;
+  __ Ldr(global_ctx, NativeContextMemOperand());
+
+  __ Ldr(sloppy_args_map,
+         ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+  __ Ldr(
+      aliased_args_map,
+      ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
+  __ Cmp(mapped_params, 0);
+  __ CmovX(sloppy_args_map, aliased_args_map, ne);
+
+  // Copy the JS object part.
+  __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
+  __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
+  __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
+  __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+
+  // Set up the callee in-object property.
+  __ AssertNotSmi(function);
+  __ Str(function,
+         FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kCalleeOffset));
+
+  // Use the length and set that as an in-object property.
+  __ Str(arg_count_smi,
+         FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kLengthOffset));
+
+  // Set up the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, "elements" will point there, otherwise
+  // it will point to the backing store.
+
+  //   x0   alloc_obj     pointer to allocated objects (param map, backing
+  //                      store, arguments)
+  //   x1   function      function pointer
+  //   x2   arg_count_smi number of function arguments (smi)
+  //   x3   recv_arg      pointer to receiver arguments
+  //   x4   mapped_params number of mapped parameters, min(params, args)
+  //   x5   elements      pointer to parameter map or backing store (uninit)
+  //   x6   backing_store pointer to backing store (uninit)
+  //   x7   param_count   number of function parameters
+  //   x14  arg_count     number of function arguments
+
+  Register elements = x5;
+  __ Add(elements, alloc_obj, JSSloppyArgumentsObject::kSize);
+  __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  __ Cmp(mapped_params, 0);
+  // Set up backing store address, because it is needed later for filling in
+  // the unmapped arguments.
+  Register backing_store = x6;
+  __ CmovX(backing_store, elements, eq);
+  __ B(eq, &skip_parameter_map);
+
+  __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
+  __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+  __ Add(x10, mapped_params, 2);
+  __ SmiTag(x10);
+  __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Str(cp, FieldMemOperand(elements,
+                             FixedArray::kHeaderSize + 0 * kPointerSize));
+  __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
+  __ Add(x10, x10, kParameterMapHeaderSize);
+  __ Str(x10, FieldMemOperand(elements,
+                              FixedArray::kHeaderSize + 1 * kPointerSize));
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. Then index the context,
+  // where parameters are stored in reverse order, at:
+  //
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
+  //
+  // The mapped parameter thus needs to get indices:
+  //
+  //   MIN_CONTEXT_SLOTS + parameter_count - 1 ..
+  //     MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
+  //
+  // We loop from right to left.
+
+  //   x0   alloc_obj     pointer to allocated objects (param map, backing
+  //                      store, arguments)
+  //   x1   function      function pointer
+  //   x2   arg_count_smi number of function arguments (smi)
+  //   x3   recv_arg      pointer to receiver arguments
+  //   x4   mapped_params number of mapped parameters, min(params, args)
+  //   x5   elements      pointer to parameter map or backing store (uninit)
+  //   x6   backing_store pointer to backing store (uninit)
+  //   x7   param_count   number of function parameters
+  //   x11  loop_count    parameter loop counter (uninit)
+  //   x12  index         parameter index (smi, uninit)
+  //   x13  the_hole      hole value (uninit)
+  //   x14  arg_count     number of function arguments
+
+  Register loop_count = x11;
+  Register index = x12;
+  Register the_hole = x13;
+  Label parameters_loop, parameters_test;
+  __ Mov(loop_count, mapped_params);
+  __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
+  __ Sub(index, index, mapped_params);
+  __ SmiTag(index);
+  __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+  __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
+  __ Add(backing_store, backing_store, kParameterMapHeaderSize);
+
+  __ B(&parameters_test);
+
+  __ Bind(&parameters_loop);
+  __ Sub(loop_count, loop_count, 1);
+  __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
+  __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
+  __ Str(index, MemOperand(elements, x10));
+  __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
+  __ Str(the_hole, MemOperand(backing_store, x10));
+  __ Add(index, index, Smi::FromInt(1));
+  __ Bind(&parameters_test);
+  __ Cbnz(loop_count, &parameters_loop);
+
+  __ Bind(&skip_parameter_map);
+  // Copy arguments header and remaining slots (if there are any.)
+  __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+  __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
+  __ Str(arg_count_smi, FieldMemOperand(backing_store,
+                                        FixedArray::kLengthOffset));
+
+  //   x0   alloc_obj     pointer to allocated objects (param map, backing
+  //                      store, arguments)
+  //   x1   function      function pointer
+  //   x2   arg_count_smi number of function arguments (smi)
+  //   x3   recv_arg      pointer to receiver arguments
+  //   x4   mapped_params number of mapped parameters, min(params, args)
+  //   x6   backing_store pointer to backing store (uninit)
+  //   x14  arg_count     number of function arguments
+
+  Label arguments_loop, arguments_test;
+  __ Mov(x10, mapped_params);
+  __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
+  __ B(&arguments_test);
+
+  __ Bind(&arguments_loop);
+  __ Sub(recv_arg, recv_arg, kPointerSize);
+  __ Ldr(x11, MemOperand(recv_arg));
+  __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
+  __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
+  __ Add(x10, x10, 1);
+
+  __ Bind(&arguments_test);
+  __ Cmp(x10, arg_count);
+  __ B(lt, &arguments_loop);
+
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  __ Bind(&runtime);
+  __ Push(function, recv_arg, arg_count_smi);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(x1);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make x2 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ Mov(x2, fp);
+    __ B(&loop_entry);
+    __ Bind(&loop);
+    __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+    __ Bind(&loop_entry);
+    __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+    __ Cmp(x3, x1);
+    __ B(ne, &loop);
+  }
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+  __ Ldr(x4, MemOperand(x3, StandardFrameConstants::kContextOffset));
+  __ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ B(eq, &arguments_adaptor);
+  {
+    __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+    __ Ldrsw(x0, FieldMemOperand(
+                     x1, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
+    __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
+  }
+  __ B(&arguments_done);
+  __ Bind(&arguments_adaptor);
+  {
+    __ Ldrsw(x0, UntagSmiMemOperand(
+                     x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ Add(x2, x3, Operand(x0, LSL, kPointerSizeLog2));
+    __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
+  }
+  __ Bind(&arguments_done);
+
+  // ----------- S t a t e -------------
+  //  -- cp : context
+  //  -- x0 : number of rest parameters
+  //  -- x2 : pointer to first rest parameters
+  //  -- lr : return address
+  // -----------------------------------
+
+  // Allocate space for the strict arguments object plus the backing store.
+  Label allocate, done_allocate;
+  __ Mov(x1, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
+  __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
+  __ Allocate(x1, x3, x4, x5, &allocate, TAG_OBJECT);
+  __ Bind(&done_allocate);
+
+  // Compute arguments.length in x6.
+  __ SmiTag(x6, x0);
+
+  // Setup the elements array in x3.
+  __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
+  __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
+  __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
+  __ Add(x4, x3, FixedArray::kHeaderSize);
+  {
+    Label loop, done_loop;
+    __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
+    __ Bind(&loop);
+    __ Cmp(x4, x0);
+    __ B(eq, &done_loop);
+    __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
+    __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
+    __ Sub(x2, x2, Operand(1 * kPointerSize));
+    __ Add(x4, x4, Operand(1 * kPointerSize));
+    __ B(&loop);
+    __ Bind(&done_loop);
+  }
+
+  // Setup the strict arguments object in x0.
+  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, x1);
+  __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kMapOffset));
+  __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
+  __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kPropertiesOffset));
+  __ Str(x3, FieldMemOperand(x0, JSStrictArgumentsObject::kElementsOffset));
+  __ Str(x6, FieldMemOperand(x0, JSStrictArgumentsObject::kLengthOffset));
+  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+  __ Ret();
+
+  // Fall back to %AllocateInNewSpace.
+  __ Bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(x0);
+    __ SmiTag(x1);
+    __ Push(x0, x2, x1);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Mov(x3, x0);
+    __ Pop(x2, x0);
+    __ SmiUntag(x0);
+  }
+  __ B(&done_allocate);
+}
+
+
 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
   Register context = cp;
   Register result = x0;
@@ -5656,11 +5804,10 @@
   __ B(&leave_exit_frame);
 }
 
-
 static void CallApiFunctionStubHelper(MacroAssembler* masm,
                                       const ParameterCount& argc,
                                       bool return_first_arg,
-                                      bool call_data_undefined) {
+                                      bool call_data_undefined, bool is_lazy) {
   // ----------- S t a t e -------------
   //  -- x0                  : callee
   //  -- x4                  : call_data
@@ -5697,8 +5844,10 @@
   // FunctionCallbackArguments: context, callee and call data.
   __ Push(context, callee, call_data);
 
-  // Load context from callee
-  __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+  if (!is_lazy) {
+    // Load context from callee
+    __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+  }
 
   if (!call_data_undefined) {
     __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
@@ -5783,7 +5932,7 @@
 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
   bool call_data_undefined = this->call_data_undefined();
   CallApiFunctionStubHelper(masm, ParameterCount(x3), false,
-                            call_data_undefined);
+                            call_data_undefined, false);
 }
 
 
@@ -5791,24 +5940,29 @@
   bool is_store = this->is_store();
   int argc = this->argc();
   bool call_data_undefined = this->call_data_undefined();
+  bool is_lazy = this->is_lazy();
   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined);
+                            call_data_undefined, is_lazy);
 }
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- sp[0]                  : name
-  //  -- sp[8 - kArgsLength*8]  : PropertyCallbackArguments object
+  //  -- sp[0]                         : name
+  //  -- sp[8 .. (8 + kArgsLength*8)]  : v8::PropertyCallbackInfo::args_
   //  -- ...
-  //  -- x2                     : api_function_address
+  //  -- x2                            : api_function_address
   // -----------------------------------
 
   Register api_function_address = ApiGetterDescriptor::function_address();
   DCHECK(api_function_address.is(x2));
 
+  // v8::PropertyCallbackInfo::args_ array and name handle.
+  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+  // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
   __ Mov(x0, masm->StackPointer());  // x0 = Handle<Name>
-  __ Add(x1, x0, 1 * kPointerSize);  // x1 = PCA
+  __ Add(x1, x0, 1 * kPointerSize);  // x1 = v8::PCI::args_
 
   const int kApiStackSpace = 1;
 
@@ -5819,20 +5973,22 @@
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
 
-  // Create PropertyAccessorInfo instance on the stack above the exit frame with
-  // x1 (internal::Object** args_) as the data.
+  // Create v8::PropertyCallbackInfo object on the stack and initialize
+  // it's args_ field.
   __ Poke(x1, 1 * kPointerSize);
-  __ Add(x1, masm->StackPointer(), 1 * kPointerSize);  // x1 = AccessorInfo&
-
-  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+  __ Add(x1, masm->StackPointer(), 1 * kPointerSize);
+  // x1 = v8::PropertyCallbackInfo&
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
   const int spill_offset = 1 + kApiStackSpace;
+  // +3 is to skip prolog, return address and name handle.
+  MemOperand return_value_operand(
+      fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
                            kStackUnwindSpace, NULL, spill_offset,
-                           MemOperand(fp, 6 * kPointerSize), NULL);
+                           return_value_operand, NULL);
 }
 
 
diff --git a/src/arm64/cpu-arm64.cc b/src/arm64/cpu-arm64.cc
index cf2cc57..37bb4a2 100644
--- a/src/arm64/cpu-arm64.cc
+++ b/src/arm64/cpu-arm64.cc
@@ -19,8 +19,8 @@
     cache_type_register_ = 0;
 #else
     // Copy the content of the cache type register to a core register.
-    __asm__ __volatile__ ("mrs %[ctr], ctr_el0"  // NOLINT
-                          : [ctr] "=r" (cache_type_register_));
+    __asm__ __volatile__("mrs %[ctr], ctr_el0"  // NOLINT
+                         : [ctr] "=r"(cache_type_register_));
 #endif
   }
 
@@ -37,7 +37,6 @@
   uint32_t cache_type_register_;
 };
 
-
 void CpuFeatures::FlushICache(void* address, size_t length) {
 #ifdef V8_HOST_ARCH_ARM64
   // The code below assumes user space cache operations are allowed. The goal
diff --git a/src/arm64/deoptimizer-arm64.cc b/src/arm64/deoptimizer-arm64.cc
index 118c5df..3aa1e4d 100644
--- a/src/arm64/deoptimizer-arm64.cc
+++ b/src/arm64/deoptimizer-arm64.cc
@@ -65,30 +65,7 @@
 }
 
 
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  // Set the register values. The values are not important as there are no
-  // callee saved registers in JavaScript frames, so all registers are
-  // spilled. Registers fp and sp are set to the correct values though.
-  for (int i = 0; i < Register::NumRegisters(); i++) {
-    input_->SetRegister(i, 0);
-  }
-
-  // TODO(all): Do we also need to set a value to csp?
-  input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
-  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-
-  for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
-    input_->SetDoubleRegister(i, 0.0);
-  }
-
-  // Fill the frame content from the actual data on the frame.
-  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
-    input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
-  }
-}
-
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
   // There is no dynamic alignment padding on ARM64 in the input frame.
   return false;
 }
@@ -191,11 +168,13 @@
   }
 
   // Copy FP registers to the input frame.
+  CPURegList copy_fp_to_input = saved_fp_registers;
   for (int i = 0; i < saved_fp_registers.Count(); i++) {
-    int dst_offset = FrameDescription::double_registers_offset() +
-        (i * kDoubleSize);
     int src_offset = kFPRegistersOffset + (i * kDoubleSize);
     __ Peek(x2, src_offset);
+    CPURegister reg = copy_fp_to_input.PopLowestIndex();
+    int dst_offset = FrameDescription::double_registers_offset() +
+                     (reg.code() * kDoubleSize);
     __ Str(x2, MemOperand(x1, dst_offset));
   }
 
@@ -264,11 +243,11 @@
   DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
          !saved_fp_registers.IncludesAliasOf(fp_zero) &&
          !saved_fp_registers.IncludesAliasOf(fp_scratch));
-  int src_offset = FrameDescription::double_registers_offset();
   while (!saved_fp_registers.IsEmpty()) {
     const CPURegister reg = saved_fp_registers.PopLowestIndex();
+    int src_offset = FrameDescription::double_registers_offset() +
+                     (reg.code() * kDoubleSize);
     __ Ldr(reg, MemOperand(x1, src_offset));
-    src_offset += kDoubleSize;
   }
 
   // Push state from the last output frame.
diff --git a/src/arm64/interface-descriptors-arm64.cc b/src/arm64/interface-descriptors-arm64.cc
index 485aa78..c6ae37e 100644
--- a/src/arm64/interface-descriptors-arm64.cc
+++ b/src/arm64/interface-descriptors-arm64.cc
@@ -56,20 +56,6 @@
 const Register StringCompareDescriptor::RightRegister() { return x0; }
 
 
-const Register ArgumentsAccessReadDescriptor::index() { return x1; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return x0; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return x1; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return x2; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return x3; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return x2; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return x3; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return x4; }
-
-
 const Register ApiGetterDescriptor::function_address() { return x2; }
 
 
@@ -98,6 +84,35 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {x1, x3};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // x1: function
+  Register registers[] = {x1};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // x1: function
+  Register registers[] = {x1};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // x1: function
+  Register registers[] = {x1};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 
 void ToNumberDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -116,6 +131,10 @@
 
 
 // static
+const Register ToNameDescriptor::ReceiverRegister() { return x0; }
+
+
+// static
 const Register ToObjectDescriptor::ReceiverRegister() { return x0; }
 
 
@@ -185,13 +204,6 @@
 }
 
 
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {x3, x0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void CallFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // x1  function    the function to call
@@ -465,6 +477,14 @@
                                    &default_descriptor);
 }
 
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+      kInterpreterDispatchTableRegister};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -476,7 +496,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
@@ -488,7 +507,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterCEntryDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index fbf459d..953c3fd 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -1488,18 +1488,15 @@
 }
 
 
-void MacroAssembler::CheckEnumCache(Register object,
-                                    Register null_value,
-                                    Register scratch0,
-                                    Register scratch1,
-                                    Register scratch2,
-                                    Register scratch3,
+void MacroAssembler::CheckEnumCache(Register object, Register scratch0,
+                                    Register scratch1, Register scratch2,
+                                    Register scratch3, Register scratch4,
                                     Label* call_runtime) {
-  DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
-                     scratch3));
+  DCHECK(!AreAliased(object, scratch0, scratch1, scratch2, scratch3, scratch4));
 
   Register empty_fixed_array_value = scratch0;
   Register current_object = scratch1;
+  Register null_value = scratch4;
 
   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   Label next, start;
@@ -1516,6 +1513,7 @@
   Cmp(enum_length, kInvalidEnumCacheSentinel);
   B(eq, call_runtime);
 
+  LoadRoot(null_value, Heap::kNullValueRootIndex);
   B(&start);
 
   Bind(&next);
@@ -1576,10 +1574,9 @@
                                 Label* branch) {
   DCHECK(cond == eq || cond == ne);
   UseScratchRegisterScope temps(this);
-  Register temp = temps.AcquireX();
-  And(temp, object, ExternalReference::new_space_mask(isolate()));
-  Cmp(temp, ExternalReference::new_space_start(isolate()));
-  B(cond, branch);
+  const int mask =
+      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+  CheckPageFlag(object, temps.AcquireSameSizeAs(object), mask, cond, branch);
 }
 
 
@@ -1641,6 +1638,20 @@
 }
 
 
+void MacroAssembler::AssertReceiver(Register object) {
+  if (emit_debug_code()) {
+    AssertNotSmi(object, kOperandIsASmiAndNotAReceiver);
+
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
+
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE);
+    Check(hs, kOperandIsNotAReceiver);
+  }
+}
+
+
 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
                                                      Register scratch) {
   if (emit_debug_code()) {
@@ -1679,6 +1690,15 @@
   }
 }
 
+void MacroAssembler::AssertNumber(Register value) {
+  if (emit_debug_code()) {
+    Label done;
+    JumpIfSmi(value, &done);
+    JumpIfHeapNumber(value, &done);
+    Abort(kOperandIsNotANumber);
+    Bind(&done);
+  }
+}
 
 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
@@ -1727,19 +1747,6 @@
 }
 
 
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                                   const CallWrapper& call_wrapper) {
-  ASM_LOCATION("MacroAssembler::InvokeBuiltin");
-  // You can't call a builtin without a valid frame.
-  DCHECK(flag == JUMP_FUNCTION || has_frame());
-
-  // Fake a parameter count to avoid emitting code to do the check.
-  ParameterCount expected(0);
-  LoadNativeContextSlot(native_context_index, x1);
-  InvokeFunctionCode(x1, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
   const Runtime::Function* function = Runtime::FunctionForId(fid);
   DCHECK_EQ(1, function->result_size);
@@ -2423,7 +2430,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -3824,6 +3831,65 @@
   Ldr(result, FieldMemOperand(scratch2, kValueOffset));
 }
 
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+                                               Register code_entry,
+                                               Register scratch) {
+  const int offset = JSFunction::kCodeEntryOffset;
+
+  // Since a code entry (value) is always in old space, we don't need to update
+  // remembered set. If incremental marking is off, there is nothing for us to
+  // do.
+  if (!FLAG_incremental_marking) return;
+
+  DCHECK(js_function.is(x1));
+  DCHECK(code_entry.is(x7));
+  DCHECK(scratch.is(x5));
+  AssertNotSmi(js_function);
+
+  if (emit_debug_code()) {
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
+    Add(scratch, js_function, offset - kHeapObjectTag);
+    Ldr(temp, MemOperand(scratch));
+    Cmp(temp, code_entry);
+    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+
+  CheckPageFlagClear(code_entry, scratch,
+                     MemoryChunk::kPointersToHereAreInterestingMask, &done);
+  CheckPageFlagClear(js_function, scratch,
+                     MemoryChunk::kPointersFromHereAreInterestingMask, &done);
+
+  const Register dst = scratch;
+  Add(dst, js_function, offset - kHeapObjectTag);
+
+  // Save caller-saved registers.Both input registers (x1 and x7) are caller
+  // saved, so there is no need to push them.
+  PushCPURegList(kCallerSaved);
+
+  int argument_count = 3;
+
+  Mov(x0, js_function);
+  Mov(x1, dst);
+  Mov(x2, ExternalReference::isolate_address(isolate()));
+
+  {
+    AllowExternalCallThatCantCauseGC scope(this);
+    CallCFunction(
+        ExternalReference::incremental_marking_record_write_code_entry_function(
+            isolate()),
+        argument_count);
+  }
+
+  // Restore caller-saved registers.
+  PopCPURegList(kCallerSaved);
+
+  Bind(&done);
+}
 
 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
                                          Register address,
@@ -3938,6 +4004,17 @@
   }
 }
 
+void MacroAssembler::CheckPageFlag(const Register& object,
+                                   const Register& scratch, int mask,
+                                   Condition cc, Label* condition_met) {
+  And(scratch, object, ~Page::kPageAlignmentMask);
+  Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+  if (cc == eq) {
+    TestAndBranchIfAnySet(scratch, mask, condition_met);
+  } else {
+    TestAndBranchIfAllClear(scratch, mask, condition_met);
+  }
+}
 
 void MacroAssembler::CheckPageFlagSet(const Register& object,
                                       const Register& scratch,
@@ -4409,9 +4486,9 @@
       // We don't actually want to generate a pile of code for this, so just
       // claim there is a stack frame, without generating one.
       FrameScope scope(this, StackFrame::NONE);
-      CallRuntime(Runtime::kAbort, 1);
+      CallRuntime(Runtime::kAbort);
     } else {
-      CallRuntime(Runtime::kAbort, 1);
+      CallRuntime(Runtime::kAbort);
     }
   } else {
     // Load the string to pass to Printf.
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index 78997d6..ff41c4f 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -34,9 +34,9 @@
 namespace internal {
 
 // Give alias names to registers for calling conventions.
-// TODO(titzer): arm64 is a pain for aliasing; get rid of these macros
 #define kReturnRegister0 x0
 #define kReturnRegister1 x1
+#define kReturnRegister2 x2
 #define kJSFunctionRegister x1
 #define kContextRegister cp
 #define kInterpreterAccumulatorRegister x0
@@ -970,6 +970,9 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+  void AssertReceiver(Register object);
+
   // Abort execution if argument is not undefined or an AllocationSite, enabled
   // via --debug-code.
   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -981,6 +984,9 @@
   // --debug-code.
   void AssertPositiveOrZero(Register value);
 
+  // Abort execution if argument is not a number (heap number or smi).
+  void AssertNumber(Register value);
+
   void JumpIfHeapNumber(Register object, Label* on_heap_number,
                         SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
   void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
@@ -1138,10 +1144,6 @@
                              int num_arguments);
 
 
-  // Invoke specified builtin JavaScript function.
-  void InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                     const CallWrapper& call_wrapper = NullCallWrapper());
-
   void Jump(Register target);
   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@@ -1586,12 +1588,8 @@
   void LeaveFrame(StackFrame::Type type);
 
   // Returns map with validated enum cache in object register.
-  void CheckEnumCache(Register object,
-                      Register null_value,
-                      Register scratch0,
-                      Register scratch1,
-                      Register scratch2,
-                      Register scratch3,
+  void CheckEnumCache(Register object, Register scratch0, Register scratch1,
+                      Register scratch2, Register scratch3, Register scratch4,
                       Label* call_runtime);
 
   // AllocationMemento support. Arrays may have an associated
@@ -1730,6 +1728,9 @@
     Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
   }
 
+  void CheckPageFlag(const Register& object, const Register& scratch, int mask,
+                     Condition cc, Label* condition_met);
+
   void CheckPageFlagSet(const Register& object,
                         const Register& scratch,
                         int mask,
@@ -1793,6 +1794,11 @@
                      pointers_to_here_check_for_value);
   }
 
+  // Notify the garbage collector that we wrote a code entry into a
+  // JSFunction. Only scratch is clobbered by the operation.
+  void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+                                 Register scratch);
+
   void RecordWriteForMap(
       Register object,
       Register map,
diff --git a/src/arm64/simulator-arm64.cc b/src/arm64/simulator-arm64.cc
index 8f72669..81dbdf8 100644
--- a/src/arm64/simulator-arm64.cc
+++ b/src/arm64/simulator-arm64.cc
@@ -15,6 +15,7 @@
 #include "src/disasm.h"
 #include "src/macro-assembler.h"
 #include "src/ostreams.h"
+#include "src/runtime/runtime-utils.h"
 
 namespace v8 {
 namespace internal {
@@ -533,12 +534,6 @@
 // uses the ObjectPair structure.
 // The simulator assumes all runtime calls return two 64-bits values. If they
 // don't, register x1 is clobbered. This is fine because x1 is caller-saved.
-struct ObjectPair {
-  int64_t res0;
-  int64_t res1;
-};
-
-
 typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
                                            int64_t arg1,
                                            int64_t arg2,
@@ -548,6 +543,11 @@
                                            int64_t arg6,
                                            int64_t arg7);
 
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
+                                                   int64_t arg2, int64_t arg3,
+                                                   int64_t arg4, int64_t arg5,
+                                                   int64_t arg6, int64_t arg7);
+
 typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
 typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
 typedef double (*SimulatorRuntimeFPCall)(double arg1);
@@ -589,8 +589,10 @@
       UNREACHABLE();
       break;
 
-    case ExternalReference::BUILTIN_CALL: {
-      // Object* f(v8::internal::Arguments).
+    case ExternalReference::BUILTIN_CALL:
+    case ExternalReference::BUILTIN_CALL_PAIR: {
+      // Object* f(v8::internal::Arguments) or
+      // ObjectPair f(v8::internal::Arguments).
       TraceSim("Type: BUILTIN_CALL\n");
       SimulatorRuntimeCall target =
         reinterpret_cast<SimulatorRuntimeCall>(external);
@@ -607,13 +609,41 @@
                xreg(4), xreg(5), xreg(6), xreg(7));
       ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
                                  xreg(4), xreg(5), xreg(6), xreg(7));
-      TraceSim("Returned: {0x%" PRIx64 ", 0x%" PRIx64 "}\n",
-               result.res0, result.res1);
+      TraceSim("Returned: {%p, %p}\n", result.x, result.y);
 #ifdef DEBUG
       CorruptAllCallerSavedCPURegisters();
 #endif
-      set_xreg(0, result.res0);
-      set_xreg(1, result.res1);
+      set_xreg(0, reinterpret_cast<int64_t>(result.x));
+      set_xreg(1, reinterpret_cast<int64_t>(result.y));
+      break;
+    }
+
+    case ExternalReference::BUILTIN_CALL_TRIPLE: {
+      // ObjectTriple f(v8::internal::Arguments).
+      TraceSim("Type: BUILTIN_CALL TRIPLE\n");
+      SimulatorRuntimeTripleCall target =
+          reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+
+      // We don't know how many arguments are being passed, but we can
+      // pass 8 without touching the stack. They will be ignored by the
+      // host function if they aren't used.
+      TraceSim(
+          "Arguments: "
+          "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+          "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+          "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+          "0x%016" PRIx64 ", 0x%016" PRIx64,
+          xreg(0), xreg(1), xreg(2), xreg(3), xreg(4), xreg(5), xreg(6),
+          xreg(7));
+      // Return location passed in x8.
+      ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(xreg(8));
+      ObjectTriple result = target(xreg(0), xreg(1), xreg(2), xreg(3), xreg(4),
+                                   xreg(5), xreg(6), xreg(7));
+      TraceSim("Returned: {%p, %p, %p}\n", result.x, result.y, result.z);
+#ifdef DEBUG
+      CorruptAllCallerSavedCPURegisters();
+#endif
+      *sim_result = result;
       break;
     }
 
@@ -1966,10 +1996,10 @@
 
   switch (instr->Mask(DataProcessing1SourceMask)) {
     case RBIT_w:
-      set_wreg(dst, ReverseBits(wreg(src)));
+      set_wreg(dst, base::bits::ReverseBits(wreg(src)));
       break;
     case RBIT_x:
-      set_xreg(dst, ReverseBits(xreg(src)));
+      set_xreg(dst, base::bits::ReverseBits(xreg(src)));
       break;
     case REV16_w:
       set_wreg(dst, ReverseBytes(wreg(src), 1));
@@ -3510,7 +3540,8 @@
           HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
           int64_t value = *cur;
           Heap* current_heap = isolate_->heap();
-          if (((value & 1) == 0) || current_heap->Contains(obj)) {
+          if (((value & 1) == 0) ||
+              current_heap->ContainsSlow(obj->address())) {
             PrintF(" (");
             if ((value & kSmiTagMask) == 0) {
               STATIC_ASSERT(kSmiValueSize == 32);
diff --git a/src/arm64/utils-arm64.h b/src/arm64/utils-arm64.h
index 1e1c0a3..35d9824 100644
--- a/src/arm64/utils-arm64.h
+++ b/src/arm64/utils-arm64.h
@@ -55,19 +55,6 @@
 
 
 template <typename T>
-T ReverseBits(T value) {
-  DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) ||
-         (sizeof(value) == 8));
-  T result = 0;
-  for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
-    result = (result << 1) | (value & 1);
-    value >>= 1;
-  }
-  return result;
-}
-
-
-template <typename T>
 T ReverseBytes(T value, int block_bytes_log2) {
   DCHECK((sizeof(value) == 4) || (sizeof(value) == 8));
   DCHECK((1U << block_bytes_log2) <= sizeof(value));
diff --git a/src/assembler.cc b/src/assembler.cc
index 4aac08d..5c8c2ce 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -34,6 +34,7 @@
 
 #include "src/assembler.h"
 
+#include <math.h>
 #include <cmath>
 #include "src/api.h"
 #include "src/base/cpu.h"
@@ -50,8 +51,8 @@
 #include "src/execution.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
+#include "src/interpreter/interpreter.h"
 #include "src/ostreams.h"
-#include "src/parsing/token.h"
 #include "src/profiler/cpu-profiler.h"
 #include "src/regexp/jsregexp.h"
 #include "src/regexp/regexp-macro-assembler.h"
@@ -265,8 +266,8 @@
 
 bool CpuFeatures::initialized_ = false;
 unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::cache_line_size_ = 0;
-
+unsigned CpuFeatures::icache_line_size_ = 0;
+unsigned CpuFeatures::dcache_line_size_ = 0;
 
 // -----------------------------------------------------------------------------
 // Implementation of Label
@@ -770,6 +771,9 @@
 // -----------------------------------------------------------------------------
 // Implementation of RelocInfo
 
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+  return DebugCodegen::DebugBreakSlotIsPatched(pc_);
+}
 
 #ifdef DEBUG
 bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
@@ -943,6 +947,20 @@
 
 // Implementation of ExternalReference
 
+static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
+  switch (result_size) {
+    case 1:
+      return ExternalReference::BUILTIN_CALL;
+    case 2:
+      return ExternalReference::BUILTIN_CALL_PAIR;
+    case 3:
+      return ExternalReference::BUILTIN_CALL_TRIPLE;
+  }
+  UNREACHABLE();
+  return ExternalReference::BUILTIN_CALL;
+}
+
+
 void ExternalReference::SetUp() {
   double_constants.min_int = kMinInt;
   double_constants.one_half = 0.5;
@@ -1025,18 +1043,23 @@
 
 
 ExternalReference::ExternalReference(Runtime::FunctionId id, Isolate* isolate)
-    : address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
+    : ExternalReference(Runtime::FunctionForId(id), isolate) {}
 
 
 ExternalReference::ExternalReference(const Runtime::Function* f,
                                      Isolate* isolate)
-    : address_(Redirect(isolate, f->entry)) {}
+    : address_(Redirect(isolate, f->entry,
+                        BuiltinCallTypeForResultSize(f->result_size))) {}
 
 
 ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
   return ExternalReference(isolate);
 }
 
+ExternalReference ExternalReference::interpreter_dispatch_table_address(
+    Isolate* isolate) {
+  return ExternalReference(isolate->interpreter()->dispatch_table_address());
+}
 
 ExternalReference::ExternalReference(StatsCounter* counter)
   : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
@@ -1057,9 +1080,16 @@
       FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
 }
 
+ExternalReference
+ExternalReference::incremental_marking_record_write_code_entry_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate,
+      FUNCTION_ADDR(IncrementalMarking::RecordWriteOfCodeEntryFromCode)));
+}
 
-ExternalReference ExternalReference::
-    store_buffer_overflow_function(Isolate* isolate) {
+ExternalReference ExternalReference::store_buffer_overflow_function(
+    Isolate* isolate) {
   return ExternalReference(Redirect(
       isolate,
       FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
@@ -1117,6 +1147,67 @@
       Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
 }
 
+static void f32_trunc_wrapper(float* param) { *param = truncf(*param); }
+
+ExternalReference ExternalReference::f32_trunc_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_trunc_wrapper)));
+}
+
+static void f32_floor_wrapper(float* param) { *param = floorf(*param); }
+
+ExternalReference ExternalReference::f32_floor_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_floor_wrapper)));
+}
+
+static void f32_ceil_wrapper(float* param) { *param = ceilf(*param); }
+
+ExternalReference ExternalReference::f32_ceil_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_ceil_wrapper)));
+}
+
+static void f32_nearest_int_wrapper(float* param) {
+  *param = nearbyintf(*param);
+}
+
+ExternalReference ExternalReference::f32_nearest_int_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(f32_nearest_int_wrapper)));
+}
+
+static void f64_trunc_wrapper(double* param) { *param = trunc(*param); }
+
+ExternalReference ExternalReference::f64_trunc_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_trunc_wrapper)));
+}
+
+static void f64_floor_wrapper(double* param) { *param = floor(*param); }
+
+ExternalReference ExternalReference::f64_floor_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_floor_wrapper)));
+}
+
+static void f64_ceil_wrapper(double* param) { *param = ceil(*param); }
+
+ExternalReference ExternalReference::f64_ceil_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_ceil_wrapper)));
+}
+
+static void f64_nearest_int_wrapper(double* param) {
+  *param = nearbyint(*param);
+}
+
+ExternalReference ExternalReference::f64_nearest_int_wrapper_function(
+    Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(f64_nearest_int_wrapper)));
+}
 
 ExternalReference ExternalReference::log_enter_external_function(
     Isolate* isolate) {
@@ -1182,12 +1273,6 @@
 }
 
 
-ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
-  return ExternalReference(reinterpret_cast<Address>(
-      isolate->heap()->NewSpaceMask()));
-}
-
-
 ExternalReference ExternalReference::new_space_allocation_top_address(
     Isolate* isolate) {
   return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
@@ -1521,23 +1606,6 @@
 }
 
 
-bool EvalComparison(Token::Value op, double op1, double op2) {
-  DCHECK(Token::IsCompareOp(op));
-  switch (op) {
-    case Token::EQ:
-    case Token::EQ_STRICT: return (op1 == op2);
-    case Token::NE: return (op1 != op2);
-    case Token::LT: return (op1 < op2);
-    case Token::GT: return (op1 > op2);
-    case Token::LTE: return (op1 <= op2);
-    case Token::GTE: return (op1 >= op2);
-    default:
-      UNREACHABLE();
-      return false;
-  }
-}
-
-
 ExternalReference ExternalReference::mod_two_doubles_operation(
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
@@ -1837,11 +1905,9 @@
 // Platform specific but identical code for all the platforms.
 
 
-void Assembler::RecordDeoptReason(const int reason,
-                                  const SourcePosition position) {
+void Assembler::RecordDeoptReason(const int reason, int raw_position) {
   if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) {
     EnsureSpace ensure_space(this);
-    int raw_position = position.IsUnknown() ? 0 : position.raw();
     RecordRelocInfo(RelocInfo::POSITION, raw_position);
     RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
   }
diff --git a/src/assembler.h b/src/assembler.h
index 08c6b38..7bd9ee6 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -38,7 +38,6 @@
 #include "src/allocation.h"
 #include "src/builtins.h"
 #include "src/isolate.h"
-#include "src/parsing/token.h"
 #include "src/runtime/runtime.h"
 
 namespace v8 {
@@ -49,7 +48,6 @@
 namespace internal {
 
 // Forward declarations.
-class SourcePosition;
 class StatsCounter;
 
 // -----------------------------------------------------------------------------
@@ -225,9 +223,14 @@
 
   static inline bool SupportsCrankshaft();
 
-  static inline unsigned cache_line_size() {
-    DCHECK(cache_line_size_ != 0);
-    return cache_line_size_;
+  static inline unsigned icache_line_size() {
+    DCHECK(icache_line_size_ != 0);
+    return icache_line_size_;
+  }
+
+  static inline unsigned dcache_line_size() {
+    DCHECK(dcache_line_size_ != 0);
+    return dcache_line_size_;
   }
 
   static void PrintTarget();
@@ -243,7 +246,8 @@
   static void ProbeImpl(bool cross_compile);
 
   static unsigned supported_;
-  static unsigned cache_line_size_;
+  static unsigned icache_line_size_;
+  static unsigned dcache_line_size_;
   static bool initialized_;
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
 };
@@ -614,13 +618,9 @@
   template<typename StaticVisitor> inline void Visit(Heap* heap);
   inline void Visit(Isolate* isolate, ObjectVisitor* v);
 
-  // Check whether this return sequence has been patched
-  // with a call to the debugger.
-  INLINE(bool IsPatchedReturnSequence());
-
   // Check whether this debug break slot has been patched with a call to the
   // debugger.
-  INLINE(bool IsPatchedDebugBreakSlotSequence());
+  bool IsPatchedDebugBreakSlotSequence();
 
 #ifdef DEBUG
   // Check whether the given code contains relocation information that
@@ -819,6 +819,14 @@
     // Object* f(v8::internal::Arguments).
     BUILTIN_CALL,  // default
 
+    // Builtin call returning object pair.
+    // ObjectPair f(v8::internal::Arguments).
+    BUILTIN_CALL_PAIR,
+
+    // Builtin call that returns .
+    // ObjectTriple f(v8::internal::Arguments).
+    BUILTIN_CALL_TRIPLE,
+
     // Builtin that takes float arguments and returns an int.
     // int f(double, double).
     BUILTIN_COMPARE_CALL,
@@ -885,8 +893,12 @@
   // pattern. This means that they have to be added to the
   // ExternalReferenceTable in serialize.cc manually.
 
+  static ExternalReference interpreter_dispatch_table_address(Isolate* isolate);
+
   static ExternalReference incremental_marking_record_write_function(
       Isolate* isolate);
+  static ExternalReference incremental_marking_record_write_code_entry_function(
+      Isolate* isolate);
   static ExternalReference store_buffer_overflow_function(
       Isolate* isolate);
   static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
@@ -901,6 +913,15 @@
   static ExternalReference new_deoptimizer_function(Isolate* isolate);
   static ExternalReference compute_output_frames_function(Isolate* isolate);
 
+  static ExternalReference f32_trunc_wrapper_function(Isolate* isolate);
+  static ExternalReference f32_floor_wrapper_function(Isolate* isolate);
+  static ExternalReference f32_ceil_wrapper_function(Isolate* isolate);
+  static ExternalReference f32_nearest_int_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_trunc_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_floor_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_ceil_wrapper_function(Isolate* isolate);
+  static ExternalReference f64_nearest_int_wrapper_function(Isolate* isolate);
+
   // Log support.
   static ExternalReference log_enter_external_function(Isolate* isolate);
   static ExternalReference log_leave_external_function(Isolate* isolate);
@@ -933,7 +954,6 @@
 
   // Static variable Heap::NewSpaceStart()
   static ExternalReference new_space_start(Isolate* isolate);
-  static ExternalReference new_space_mask(Isolate* isolate);
 
   // Write barrier.
   static ExternalReference store_buffer_top(Isolate* isolate);
@@ -1120,8 +1140,6 @@
   return num_bits_set;
 }
 
-bool EvalComparison(Token::Value op, double op1, double op2);
-
 // Computes pow(x, y) with the special cases in the spec for Math.pow.
 double power_helper(Isolate* isolate, double x, double y);
 double power_double_int(double x, int y);
diff --git a/src/ast/ast-expression-rewriter.cc b/src/ast/ast-expression-rewriter.cc
index 49cc7f6..edee91d 100644
--- a/src/ast/ast-expression-rewriter.cc
+++ b/src/ast/ast-expression-rewriter.cc
@@ -398,10 +398,10 @@
 }
 
 
-void AstExpressionRewriter::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* node) {
+void AstExpressionRewriter::VisitRewritableExpression(
+    RewritableExpression* node) {
   REWRITE_THIS(node);
-  AST_REWRITE_PROPERTY(Expression, node, expression);
+  AST_REWRITE(Expression, node->expression(), node->Rewrite(replacement));
 }
 
 
diff --git a/src/ast/ast-expression-rewriter.h b/src/ast/ast-expression-rewriter.h
index 916842a..1da3fa8 100644
--- a/src/ast/ast-expression-rewriter.h
+++ b/src/ast/ast-expression-rewriter.h
@@ -8,9 +8,7 @@
 #include "src/allocation.h"
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
-#include "src/effects.h"
 #include "src/type-info.h"
-#include "src/types.h"
 #include "src/zone.h"
 
 namespace v8 {
diff --git a/src/ast/ast-expression-visitor.cc b/src/ast/ast-expression-visitor.cc
index 6b2550c..dbf4ea4 100644
--- a/src/ast/ast-expression-visitor.cc
+++ b/src/ast/ast-expression-visitor.cc
@@ -208,6 +208,7 @@
 
 
 void AstExpressionVisitor::VisitDoExpression(DoExpression* expr) {
+  VisitExpression(expr);
   RECURSE(VisitBlock(expr->block()));
   RECURSE(VisitVariableProxy(expr->result()));
 }
@@ -399,8 +400,8 @@
 }
 
 
-void AstExpressionVisitor::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* expr) {
+void AstExpressionVisitor::VisitRewritableExpression(
+    RewritableExpression* expr) {
   VisitExpression(expr);
   RECURSE(Visit(expr->expression()));
 }
diff --git a/src/ast/ast-expression-visitor.h b/src/ast/ast-expression-visitor.h
index cda624d..545a45c 100644
--- a/src/ast/ast-expression-visitor.h
+++ b/src/ast/ast-expression-visitor.h
@@ -8,9 +8,7 @@
 #include "src/allocation.h"
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
-#include "src/effects.h"
 #include "src/type-info.h"
-#include "src/types.h"
 #include "src/zone.h"
 
 namespace v8 {
diff --git a/src/ast/ast-literal-reindexer.cc b/src/ast/ast-literal-reindexer.cc
index fce33e7..1f79b12 100644
--- a/src/ast/ast-literal-reindexer.cc
+++ b/src/ast/ast-literal-reindexer.cc
@@ -44,7 +44,8 @@
 
 
 void AstLiteralReindexer::VisitDoExpression(DoExpression* node) {
-  // TODO(caitp): literals in do expressions need re-indexing too.
+  Visit(node->block());
+  Visit(node->result());
 }
 
 
@@ -76,8 +77,8 @@
 }
 
 
-void AstLiteralReindexer::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* node) {
+void AstLiteralReindexer::VisitRewritableExpression(
+    RewritableExpression* node) {
   Visit(node->expression());
 }
 
@@ -187,6 +188,8 @@
 
 
 void AstLiteralReindexer::VisitSpread(Spread* node) {
+  // This is reachable because ParserBase::ParseArrowFunctionLiteral calls
+  // ReindexLiterals before calling RewriteDestructuringAssignments.
   Visit(node->expression());
 }
 
diff --git a/src/ast/ast-numbering.cc b/src/ast/ast-numbering.cc
index 6c2b696..272f9bd 100644
--- a/src/ast/ast-numbering.cc
+++ b/src/ast/ast-numbering.cc
@@ -306,7 +306,6 @@
 void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
   IncrementNodeCount();
   DisableOptimization(kTryCatchStatement);
-  node->set_base_id(ReserveIdRange(TryCatchStatement::num_ids()));
   Visit(node->try_block());
   Visit(node->catch_block());
 }
@@ -315,7 +314,6 @@
 void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
   IncrementNodeCount();
   DisableOptimization(kTryFinallyStatement);
-  node->set_base_id(ReserveIdRange(TryFinallyStatement::num_ids()));
   Visit(node->try_block());
   Visit(node->finally_block());
 }
@@ -372,11 +370,7 @@
 }
 
 
-void AstNumberingVisitor::VisitSpread(Spread* node) {
-  IncrementNodeCount();
-  DisableCrankshaft(kSpread);
-  Visit(node->expression());
-}
+void AstNumberingVisitor::VisitSpread(Spread* node) { UNREACHABLE(); }
 
 
 void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
@@ -510,6 +504,9 @@
 
 void AstNumberingVisitor::VisitCall(Call* node) {
   IncrementNodeCount();
+  if (node->tail_call_mode() == TailCallMode::kAllow) {
+    DisableOptimization(kTailCall);
+  }
   ReserveFeedbackSlots(node);
   node->set_base_id(ReserveIdRange(Call::num_ids()));
   Visit(node->expression());
@@ -557,10 +554,10 @@
 }
 
 
-void AstNumberingVisitor::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* node) {
+void AstNumberingVisitor::VisitRewritableExpression(
+    RewritableExpression* node) {
   IncrementNodeCount();
-  node->set_base_id(ReserveIdRange(RewritableAssignmentExpression::num_ids()));
+  node->set_base_id(ReserveIdRange(RewritableExpression::num_ids()));
   Visit(node->expression());
 }
 
diff --git a/src/ast/ast-value-factory.cc b/src/ast/ast-value-factory.cc
index 2e17fbc..189d4cc 100644
--- a/src/ast/ast-value-factory.cc
+++ b/src/ast/ast-value-factory.cc
@@ -172,6 +172,8 @@
       if (symbol_name_[0] == 'i') {
         DCHECK_EQ(0, strcmp(symbol_name_, "iterator_symbol"));
         value_ = isolate->factory()->iterator_symbol();
+      } else if (strcmp(symbol_name_, "hasInstance_symbol") == 0) {
+        value_ = isolate->factory()->has_instance_symbol();
       } else {
         DCHECK_EQ(0, strcmp(symbol_name_, "home_object_symbol"));
         value_ = isolate->factory()->home_object_symbol();
diff --git a/src/ast/ast-value-factory.h b/src/ast/ast-value-factory.h
index 4ae912e..85e8277 100644
--- a/src/ast/ast-value-factory.h
+++ b/src/ast/ast-value-factory.h
@@ -255,6 +255,7 @@
   F(dot_catch, ".catch")                        \
   F(empty, "")                                  \
   F(eval, "eval")                               \
+  F(function, "function")                       \
   F(get_space, "get ")                          \
   F(let, "let")                                 \
   F(native, "native")                           \
@@ -263,9 +264,11 @@
   F(proto, "__proto__")                         \
   F(prototype, "prototype")                     \
   F(rest_parameter, ".rest_parameter")          \
+  F(return, "return")                           \
   F(set_space, "set ")                          \
   F(this, "this")                               \
   F(this_function, ".this_function")            \
+  F(throw, "throw")                             \
   F(undefined, "undefined")                     \
   F(use_asm, "use asm")                         \
   F(use_strong, "use strong")                   \
diff --git a/src/ast/ast.cc b/src/ast/ast.cc
index 69e7351..9b2c638 100644
--- a/src/ast/ast.cc
+++ b/src/ast/ast.cc
@@ -5,6 +5,8 @@
 #include "src/ast/ast.h"
 
 #include <cmath>  // For isfinite.
+
+#include "src/ast/prettyprinter.h"
 #include "src/ast/scopes.h"
 #include "src/builtins.h"
 #include "src/code-stubs.h"
@@ -32,6 +34,25 @@
 // ----------------------------------------------------------------------------
 // Implementation of other node functionality.
 
+#ifdef DEBUG
+
+void AstNode::Print() { Print(Isolate::Current()); }
+
+
+void AstNode::Print(Isolate* isolate) {
+  AstPrinter::PrintOut(isolate, this);
+}
+
+
+void AstNode::PrettyPrint() { PrettyPrint(Isolate::Current()); }
+
+
+void AstNode::PrettyPrint(Isolate* isolate) {
+  PrettyPrinter::PrintOut(isolate, this);
+}
+
+#endif  // DEBUG
+
 
 bool Expression::IsSmiLiteral() const {
   return IsLiteral() && AsLiteral()->value()->IsSmi();
@@ -254,14 +275,21 @@
   }
 }
 
+bool ObjectLiteralProperty::NeedsSetFunctionName() const {
+  return is_computed_name_ &&
+         (value_->IsAnonymousFunctionDefinition() ||
+          (value_->IsFunctionLiteral() &&
+           IsConciseMethod(value_->AsFunctionLiteral()->kind())));
+}
 
 void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
                                              FeedbackVectorSpec* spec,
                                              FeedbackVectorSlotCache* cache) {
   // This logic that computes the number of slots needed for vector store
   // ICs must mirror FullCodeGenerator::VisitClassLiteral.
+  prototype_slot_ = spec->AddLoadICSlot();
   if (NeedsProxySlot()) {
-    slot_ = spec->AddStoreICSlot();
+    proxy_slot_ = spec->AddStoreICSlot();
   }
 
   for (int i = 0; i < properties()->length(); i++) {
@@ -476,10 +504,11 @@
 
 
 void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
+  DCHECK_LT(first_spread_index_, 0);
+
   if (!constant_elements_.is_null()) return;
 
-  int constants_length =
-      first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
+  int constants_length = values()->length();
 
   // Allocate a fixed array to hold all the object literals.
   Handle<JSArray> array = isolate->factory()->NewJSArray(
@@ -487,7 +516,7 @@
       Strength::WEAK, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
 
   // Fill in the literals.
-  bool is_simple = (first_spread_index_ < 0);
+  bool is_simple = true;
   int depth_acc = 1;
   bool is_holey = false;
   int array_index = 0;
@@ -553,7 +582,7 @@
   int array_index = 0;
   for (; array_index < values()->length(); array_index++) {
     Expression* subexpr = values()->at(array_index);
-    if (subexpr->IsSpread()) break;
+    DCHECK(!subexpr->IsSpread());
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     // We'll reuse the same literal slot for all of the non-constant
@@ -797,14 +826,12 @@
   }
 }
 
-
 CaseClause::CaseClause(Zone* zone, Expression* label,
                        ZoneList<Statement*>* statements, int pos)
     : Expression(zone, pos),
       label_(label),
       statements_(statements),
-      compare_type_(Type::None(zone)) {}
-
+      compare_type_(Type::None()) {}
 
 uint32_t Literal::Hash() {
   return raw_value()->IsString()
diff --git a/src/ast/ast.h b/src/ast/ast.h
index 7f00955..dcb440d 100644
--- a/src/ast/ast.h
+++ b/src/ast/ast.h
@@ -91,7 +91,7 @@
   V(CaseClause)                 \
   V(EmptyParentheses)           \
   V(DoExpression)               \
-  V(RewritableAssignmentExpression)
+  V(RewritableExpression)
 
 #define AST_NODE_LIST(V)                        \
   DECLARATION_NODE_LIST(V)                      \
@@ -196,15 +196,18 @@
   virtual NodeType node_type() const = 0;
   int position() const { return position_; }
 
+#ifdef DEBUG
+  void PrettyPrint(Isolate* isolate);
+  void PrettyPrint();
+  void Print(Isolate* isolate);
+  void Print();
+#endif  // DEBUG
+
   // Type testing & conversion functions overridden by concrete subclasses.
 #define DECLARE_NODE_FUNCTIONS(type) \
-  bool Is##type() const { return node_type() == AstNode::k##type; } \
-  type* As##type() { \
-    return Is##type() ? reinterpret_cast<type*>(this) : NULL; \
-  } \
-  const type* As##type() const { \
-    return Is##type() ? reinterpret_cast<const type*>(this) : NULL; \
-  }
+  V8_INLINE bool Is##type() const;   \
+  V8_INLINE type* As##type();        \
+  V8_INLINE const type* As##type() const;
   AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
 #undef DECLARE_NODE_FUNCTIONS
 
@@ -237,7 +240,6 @@
 
   bool IsEmpty() { return AsEmptyStatement() != NULL; }
   virtual bool IsJump() const { return false; }
-  virtual void MarkTail() {}
 };
 
 
@@ -317,6 +319,10 @@
   // names because [] for string objects is handled only by keyed ICs.
   virtual bool IsPropertyName() const { return false; }
 
+  // True iff the expression is a class or function expression without
+  // a syntactic name.
+  virtual bool IsAnonymousFunctionDefinition() const { return false; }
+
   // True iff the expression is a literal represented as a smi.
   bool IsSmiLiteral() const;
 
@@ -365,14 +371,6 @@
   BailoutId id() const { return BailoutId(local_id(0)); }
   TypeFeedbackId test_id() const { return TypeFeedbackId(local_id(1)); }
 
-  // Parenthesized expressions in the form `( Expression )`.
-  void set_is_parenthesized() {
-    bit_field_ = ParenthesizedField::update(bit_field_, true);
-  }
-  bool is_parenthesized() const {
-    return ParenthesizedField::decode(bit_field_);
-  }
-
  protected:
   Expression(Zone* zone, int pos)
       : AstNode(pos),
@@ -395,8 +393,6 @@
   int base_id_;
   Bounds bounds_;
   class ToBooleanTypesField : public BitField16<uint16_t, 0, 9> {};
-  class ParenthesizedField
-      : public BitField16<bool, ToBooleanTypesField::kNext, 1> {};
   uint16_t bit_field_;
   // Ends with 16-bit field; deriving classes in turn begin with
   // 16-bit fields for optimum packing efficiency.
@@ -471,10 +467,6 @@
         && labels() == NULL;  // Good enough as an approximation...
   }
 
-  void MarkTail() override {
-    if (!statements_.is_empty()) statements_.last()->MarkTail();
-  }
-
   Scope* scope() const { return scope_; }
   void set_scope(Scope* scope) { scope_ = scope; }
 
@@ -505,8 +497,6 @@
   VariableProxy* result() { return result_; }
   void set_result(VariableProxy* v) { result_ = v; }
 
-  void MarkTail() override { block_->MarkTail(); }
-
  protected:
   DoExpression(Zone* zone, Block* block, VariableProxy* result, int pos)
       : Expression(zone, pos), block_(block), result_(result) {
@@ -555,24 +545,10 @@
     return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
   }
 
-  bool is_class_declaration() const { return is_class_declaration_; }
-
-  // VariableDeclarations can be grouped into consecutive declaration
-  // groups. Each VariableDeclaration is associated with the start position of
-  // the group it belongs to. The positions are used for strong mode scope
-  // checks for classes and functions.
-  int declaration_group_start() const { return declaration_group_start_; }
-
  protected:
   VariableDeclaration(Zone* zone, VariableProxy* proxy, VariableMode mode,
-                      Scope* scope, int pos, bool is_class_declaration = false,
-                      int declaration_group_start = -1)
-      : Declaration(zone, proxy, mode, scope, pos),
-        is_class_declaration_(is_class_declaration),
-        declaration_group_start_(declaration_group_start) {}
-
-  bool is_class_declaration_;
-  int declaration_group_start_;
+                      Scope* scope, int pos)
+      : Declaration(zone, proxy, mode, scope, pos) {}
 };
 
 
@@ -820,6 +796,10 @@
                                  FeedbackVectorSlotCache* cache) override;
   FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
 
+  static const char* VisitModeString(VisitMode mode) {
+    return mode == ITERATE ? "for-of" : "for-in";
+  }
+
  protected:
   ForEachStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
       : IterationStatement(zone, labels, pos), each_(NULL), subject_(NULL) {}
@@ -857,9 +837,9 @@
 
   static int num_ids() { return parent_num_ids() + 6; }
   BailoutId BodyId() const { return BailoutId(local_id(0)); }
-  BailoutId PrepareId() const { return BailoutId(local_id(1)); }
-  BailoutId EnumId() const { return BailoutId(local_id(2)); }
-  BailoutId ToObjectId() const { return BailoutId(local_id(3)); }
+  BailoutId EnumId() const { return BailoutId(local_id(1)); }
+  BailoutId ToObjectId() const { return BailoutId(local_id(2)); }
+  BailoutId PrepareId() const { return BailoutId(local_id(3)); }
   BailoutId FilterId() const { return BailoutId(local_id(4)); }
   BailoutId AssignmentId() const { return BailoutId(local_id(5)); }
   BailoutId ContinueId() const override { return EntryId(); }
@@ -885,11 +865,13 @@
   void Initialize(Expression* each,
                   Expression* subject,
                   Statement* body,
+                  Variable* iterator,
                   Expression* assign_iterator,
                   Expression* next_result,
                   Expression* result_done,
                   Expression* assign_each) {
     ForEachStatement::Initialize(each, subject, body);
+    iterator_ = iterator;
     assign_iterator_ = assign_iterator;
     next_result_ = next_result;
     result_done_ = result_done;
@@ -900,6 +882,10 @@
     return subject();
   }
 
+  Variable* iterator() const {
+    return iterator_;
+  }
+
   // iterator = subject[Symbol.iterator]()
   Expression* assign_iterator() const {
     return assign_iterator_;
@@ -934,6 +920,7 @@
  protected:
   ForOfStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
       : ForEachStatement(zone, labels, pos),
+        iterator_(NULL),
         assign_iterator_(NULL),
         next_result_(NULL),
         result_done_(NULL),
@@ -943,6 +930,7 @@
  private:
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
+  Variable* iterator_;
   Expression* assign_iterator_;
   Expression* next_result_;
   Expression* result_done_;
@@ -957,7 +945,6 @@
   void set_expression(Expression* e) { expression_ = e; }
   Expression* expression() const { return expression_; }
   bool IsJump() const override { return expression_->IsThrow(); }
-  void MarkTail() override { expression_->MarkTail(); }
 
  protected:
   ExpressionStatement(Zone* zone, Expression* expression, int pos)
@@ -1039,8 +1026,6 @@
   BailoutId ToObjectId() const { return BailoutId(local_id(0)); }
   BailoutId EntryId() const { return BailoutId(local_id(1)); }
 
-  void MarkTail() override { statement_->MarkTail(); }
-
  protected:
   WithStatement(Zone* zone, Scope* scope, Expression* expression,
                 Statement* statement, int pos)
@@ -1083,10 +1068,6 @@
   BailoutId EntryId() const { return BailoutId(local_id(0)); }
   TypeFeedbackId CompareId() { return TypeFeedbackId(local_id(1)); }
 
-  void MarkTail() override {
-    if (!statements_->is_empty()) statements_->last()->MarkTail();
-  }
-
   Type* compare_type() { return compare_type_; }
   void set_compare_type(Type* type) { compare_type_ = type; }
 
@@ -1119,10 +1100,6 @@
 
   void set_tag(Expression* t) { tag_ = t; }
 
-  void MarkTail() override {
-    if (!cases_->is_empty()) cases_->last()->MarkTail();
-  }
-
  protected:
   SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
       : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
@@ -1160,11 +1137,6 @@
         && HasElseStatement() && else_statement()->IsJump();
   }
 
-  void MarkTail() override {
-    then_statement_->MarkTail();
-    else_statement_->MarkTail();
-  }
-
   void set_base_id(int id) { base_id_ = id; }
   static int num_ids() { return parent_num_ids() + 3; }
   BailoutId IfId() const { return BailoutId(local_id(0)); }
@@ -1201,27 +1173,12 @@
   Block* try_block() const { return try_block_; }
   void set_try_block(Block* b) { try_block_ = b; }
 
-  void set_base_id(int id) { base_id_ = id; }
-  static int num_ids() { return parent_num_ids() + 1; }
-  BailoutId HandlerId() const { return BailoutId(local_id(0)); }
-
  protected:
   TryStatement(Zone* zone, Block* try_block, int pos)
-      : Statement(zone, pos),
-        try_block_(try_block),
-        base_id_(BailoutId::None().ToInt()) {}
-  static int parent_num_ids() { return 0; }
-
-  int base_id() const {
-    DCHECK(!BailoutId(base_id_).IsNone());
-    return base_id_;
-  }
+      : Statement(zone, pos), try_block_(try_block) {}
 
  private:
-  int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
   Block* try_block_;
-  int base_id_;
 };
 
 
@@ -1234,8 +1191,6 @@
   Block* catch_block() const { return catch_block_; }
   void set_catch_block(Block* b) { catch_block_ = b; }
 
-  void MarkTail() override { catch_block_->MarkTail(); }
-
  protected:
   TryCatchStatement(Zone* zone, Block* try_block, Scope* scope,
                     Variable* variable, Block* catch_block, int pos)
@@ -1258,8 +1213,6 @@
   Block* finally_block() const { return finally_block_; }
   void set_finally_block(Block* b) { finally_block_ = b; }
 
-  void MarkTail() override { finally_block_->MarkTail(); }
-
  protected:
   TryFinallyStatement(Zone* zone, Block* try_block, Block* finally_block,
                       int pos)
@@ -1472,6 +1425,8 @@
 
   void set_receiver_type(Handle<Map> map) { receiver_type_ = map; }
 
+  bool NeedsSetFunctionName() const;
+
  protected:
   friend class AstNodeFactory;
 
@@ -1510,6 +1465,9 @@
   bool may_store_doubles() const { return may_store_doubles_; }
   bool has_function() const { return has_function_; }
   bool has_elements() const { return has_elements_; }
+  bool has_shallow_properties() const {
+    return depth() == 1 && !has_elements() && !may_store_doubles();
+  }
 
   // Decide if a property should be in the object boilerplate.
   static bool IsBoilerplateProperty(Property* property);
@@ -1526,7 +1484,7 @@
   int ComputeFlags(bool disable_mementos = false) const {
     int flags = fast_elements() ? kFastElements : kNoFlags;
     flags |= has_function() ? kHasFunction : kNoFlags;
-    if (depth() == 1 && !has_elements() && !may_store_doubles()) {
+    if (has_shallow_properties()) {
       flags |= kShallowProperties;
     }
     if (disable_mementos) {
@@ -1683,6 +1641,19 @@
     return flags;
   }
 
+  // Provide a mechanism for iterating through values to rewrite spreads.
+  ZoneList<Expression*>::iterator FirstSpread() const {
+    return (first_spread_index_ >= 0) ? values_->begin() + first_spread_index_
+                                      : values_->end();
+  }
+  ZoneList<Expression*>::iterator EndValue() const { return values_->end(); }
+
+  // Rewind an array literal omitting everything from the first spread on.
+  void RewindSpreads() {
+    values_->Rewind(first_spread_index_);
+    first_spread_index_ = -1;
+  }
+
   enum Flags {
     kNoFlags = 0,
     kShallowElements = 1,
@@ -1975,7 +1946,10 @@
     bit_field_ = IsUninitializedField::update(bit_field_, b);
   }
 
-  bool is_tail() const { return IsTailField::decode(bit_field_); }
+  TailCallMode tail_call_mode() const {
+    return IsTailField::decode(bit_field_) ? TailCallMode::kAllow
+                                           : TailCallMode::kDisallow;
+  }
   void MarkTail() override {
     bit_field_ = IsTailField::update(bit_field_, true);
   }
@@ -2349,7 +2323,7 @@
         op_(op),
         left_(left),
         right_(right),
-        combined_type_(Type::None(zone)) {
+        combined_type_(Type::None()) {
     DCHECK(Token::IsCompareOp(op));
   }
   static int parent_num_ids() { return Expression::num_ids(); }
@@ -2372,17 +2346,20 @@
   Expression* expression() const { return expression_; }
   void set_expression(Expression* e) { expression_ = e; }
 
+  int expression_position() const { return expr_pos_; }
+
   static int num_ids() { return parent_num_ids(); }
 
  protected:
-  Spread(Zone* zone, Expression* expression, int pos)
-      : Expression(zone, pos), expression_(expression) {}
+  Spread(Zone* zone, Expression* expression, int pos, int expr_pos)
+      : Expression(zone, pos), expression_(expression), expr_pos_(expr_pos) {}
   static int parent_num_ids() { return Expression::num_ids(); }
 
  private:
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
   Expression* expression_;
+  int expr_pos_;
 };
 
 
@@ -2505,18 +2482,32 @@
 };
 
 
-class RewritableAssignmentExpression : public Expression {
+// The RewritableExpression class is a wrapper for AST nodes that wait
+// for some potential rewriting.  However, even if such nodes are indeed
+// rewritten, the RewritableExpression wrapper nodes will survive in the
+// final AST and should be just ignored, i.e., they should be treated as
+// equivalent to the wrapped nodes.  For this reason and to simplify later
+// phases, RewritableExpressions are considered as exceptions of AST nodes
+// in the following sense:
+//
+// 1. IsRewritableExpression and AsRewritableExpression behave as usual.
+// 2. All other Is* and As* methods are practically delegated to the
+//    wrapped node, i.e. IsArrayLiteral() will return true iff the
+//    wrapped node is an array literal.
+//
+// Furthermore, an invariant that should be respected is that the wrapped
+// node is not a RewritableExpression.
+class RewritableExpression : public Expression {
  public:
-  DECLARE_NODE_TYPE(RewritableAssignmentExpression)
+  DECLARE_NODE_TYPE(RewritableExpression)
 
-  Expression* expression() { return expr_; }
+  Expression* expression() const { return expr_; }
   bool is_rewritten() const { return is_rewritten_; }
 
-  void set_expression(Expression* e) { expr_ = e; }
-
   void Rewrite(Expression* new_expression) {
     DCHECK(!is_rewritten());
     DCHECK_NOT_NULL(new_expression);
+    DCHECK(!new_expression->IsRewritableExpression());
     expr_ = new_expression;
     is_rewritten_ = true;
   }
@@ -2524,10 +2515,12 @@
   static int num_ids() { return parent_num_ids(); }
 
  protected:
-  RewritableAssignmentExpression(Zone* zone, Expression* expression)
+  RewritableExpression(Zone* zone, Expression* expression)
       : Expression(zone, expression->position()),
         is_rewritten_(false),
-        expr_(expression) {}
+        expr_(expression) {
+    DCHECK(!expression->IsRewritableExpression());
+  }
 
  private:
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
@@ -2555,26 +2548,6 @@
   void set_generator_object(Expression* e) { generator_object_ = e; }
   void set_expression(Expression* e) { expression_ = e; }
 
-  // Type feedback information.
-  bool HasFeedbackSlots() const { return yield_kind() == kDelegating; }
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache) override {
-    if (HasFeedbackSlots()) {
-      yield_first_feedback_slot_ = spec->AddKeyedLoadICSlot();
-      keyed_load_feedback_slot_ = spec->AddLoadICSlot();
-      done_feedback_slot_ = spec->AddLoadICSlot();
-    }
-  }
-
-  FeedbackVectorSlot KeyedLoadFeedbackSlot() {
-    DCHECK(!HasFeedbackSlots() || !yield_first_feedback_slot_.IsInvalid());
-    return yield_first_feedback_slot_;
-  }
-
-  FeedbackVectorSlot DoneFeedbackSlot() { return keyed_load_feedback_slot_; }
-
-  FeedbackVectorSlot ValueFeedbackSlot() { return done_feedback_slot_; }
-
  protected:
   Yield(Zone* zone, Expression* generator_object, Expression* expression,
         Kind yield_kind, int pos)
@@ -2587,9 +2560,6 @@
   Expression* generator_object_;
   Expression* expression_;
   Kind yield_kind_;
-  FeedbackVectorSlot yield_first_feedback_slot_;
-  FeedbackVectorSlot keyed_load_feedback_slot_;
-  FeedbackVectorSlot done_feedback_slot_;
 };
 
 
@@ -2615,15 +2585,13 @@
     kAnonymousExpression,
     kNamedExpression,
     kDeclaration,
-    kGlobalOrEval
+    kAccessorOrMethod
   };
 
   enum ParameterFlag { kNoDuplicateParameters, kHasDuplicateParameters };
 
   enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
 
-  enum ArityRestriction { kNormalArity, kGetterArity, kSetterArity };
-
   DECLARE_NODE_TYPE(FunctionLiteral)
 
   Handle<String> name() const { return raw_name_->string(); }
@@ -2636,8 +2604,13 @@
   int start_position() const;
   int end_position() const;
   int SourceSize() const { return end_position() - start_position(); }
-  bool is_expression() const { return IsExpression::decode(bitfield_); }
-  bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
+  bool is_declaration() const { return IsDeclaration::decode(bitfield_); }
+  bool is_named_expression() const {
+    return IsNamedExpression::decode(bitfield_);
+  }
+  bool is_anonymous_expression() const {
+    return IsAnonymousExpression::decode(bitfield_);
+  }
   LanguageMode language_mode() const;
 
   static bool NeedsHomeObject(Expression* expr);
@@ -2729,6 +2702,10 @@
     dont_optimize_reason_ = reason;
   }
 
+  bool IsAnonymousFunctionDefinition() const final {
+    return is_anonymous_expression();
+  }
+
  protected:
   FunctionLiteral(Zone* zone, const AstString* name,
                   AstValueFactory* ast_value_factory, Scope* scope,
@@ -2737,7 +2714,7 @@
                   FunctionType function_type,
                   ParameterFlag has_duplicate_parameters,
                   EagerCompileHint eager_compile_hint, FunctionKind kind,
-                  int position)
+                  int position, bool is_function)
       : Expression(zone, position),
         raw_name_(name),
         scope_(scope),
@@ -2750,26 +2727,28 @@
         parameter_count_(parameter_count),
         function_token_position_(RelocInfo::kNoPosition) {
     bitfield_ =
-        IsExpression::encode(function_type != kDeclaration) |
-        IsAnonymous::encode(function_type == kAnonymousExpression) |
+        IsDeclaration::encode(function_type == kDeclaration) |
+        IsNamedExpression::encode(function_type == kNamedExpression) |
+        IsAnonymousExpression::encode(function_type == kAnonymousExpression) |
         Pretenure::encode(false) |
         HasDuplicateParameters::encode(has_duplicate_parameters ==
                                        kHasDuplicateParameters) |
-        IsFunction::encode(function_type != kGlobalOrEval) |
+        IsFunction::encode(is_function) |
         ShouldEagerCompile::encode(eager_compile_hint == kShouldEagerCompile) |
         FunctionKindBits::encode(kind) | ShouldBeUsedOnceHint::encode(false);
     DCHECK(IsValidFunctionKind(kind));
   }
 
  private:
-  class IsExpression : public BitField16<bool, 0, 1> {};
-  class IsAnonymous : public BitField16<bool, 1, 1> {};
-  class Pretenure : public BitField16<bool, 2, 1> {};
-  class HasDuplicateParameters : public BitField16<bool, 3, 1> {};
-  class IsFunction : public BitField16<bool, 4, 1> {};
-  class ShouldEagerCompile : public BitField16<bool, 5, 1> {};
-  class FunctionKindBits : public BitField16<FunctionKind, 6, 8> {};
-  class ShouldBeUsedOnceHint : public BitField16<bool, 15, 1> {};
+  class IsDeclaration : public BitField16<bool, 0, 1> {};
+  class IsNamedExpression : public BitField16<bool, 1, 1> {};
+  class IsAnonymousExpression : public BitField16<bool, 2, 1> {};
+  class Pretenure : public BitField16<bool, 3, 1> {};
+  class HasDuplicateParameters : public BitField16<bool, 4, 1> {};
+  class IsFunction : public BitField16<bool, 5, 1> {};
+  class ShouldEagerCompile : public BitField16<bool, 6, 1> {};
+  class ShouldBeUsedOnceHint : public BitField16<bool, 7, 1> {};
+  class FunctionKindBits : public BitField16<FunctionKind, 8, 8> {};
 
   // Start with 16-bit field, which should get packed together
   // with Expression's trailing 16-bit field.
@@ -2796,13 +2775,6 @@
 
   DECLARE_NODE_TYPE(ClassLiteral)
 
-  Handle<String> name() const { return raw_name_->string(); }
-  const AstRawString* raw_name() const { return raw_name_; }
-  void set_raw_name(const AstRawString* name) {
-    DCHECK_NULL(raw_name_);
-    raw_name_ = name;
-  }
-
   Scope* scope() const { return scope_; }
   VariableProxy* class_variable_proxy() const { return class_variable_proxy_; }
   Expression* extends() const { return extends_; }
@@ -2817,13 +2789,14 @@
   BailoutId DeclsId() const { return BailoutId(local_id(1)); }
   BailoutId ExitId() { return BailoutId(local_id(2)); }
   BailoutId CreateLiteralId() const { return BailoutId(local_id(3)); }
+  BailoutId PrototypeId() { return BailoutId(local_id(4)); }
 
   // Return an AST id for a property that is used in simulate instructions.
-  BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 4)); }
+  BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 5)); }
 
   // Unlike other AST nodes, this number of bailout IDs allocated for an
   // ClassLiteral can vary, so num_ids() is not a static method.
-  int num_ids() const { return parent_num_ids() + 4 + properties()->length(); }
+  int num_ids() const { return parent_num_ids() + 5 + properties()->length(); }
 
   // Object literals need one feedback slot for each non-trivial value, as well
   // as some slots for home objects.
@@ -2835,15 +2808,19 @@
            class_variable_proxy()->var()->IsUnallocated();
   }
 
-  FeedbackVectorSlot ProxySlot() const { return slot_; }
+  FeedbackVectorSlot PrototypeSlot() const { return prototype_slot_; }
+  FeedbackVectorSlot ProxySlot() const { return proxy_slot_; }
+
+  bool IsAnonymousFunctionDefinition() const final {
+    return constructor()->raw_name()->length() == 0;
+  }
 
  protected:
-  ClassLiteral(Zone* zone, const AstRawString* name, Scope* scope,
-               VariableProxy* class_variable_proxy, Expression* extends,
-               FunctionLiteral* constructor, ZoneList<Property*>* properties,
-               int start_position, int end_position)
+  ClassLiteral(Zone* zone, Scope* scope, VariableProxy* class_variable_proxy,
+               Expression* extends, FunctionLiteral* constructor,
+               ZoneList<Property*>* properties, int start_position,
+               int end_position)
       : Expression(zone, start_position),
-        raw_name_(name),
         scope_(scope),
         class_variable_proxy_(class_variable_proxy),
         extends_(extends),
@@ -2856,14 +2833,14 @@
  private:
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  const AstRawString* raw_name_;
   Scope* scope_;
   VariableProxy* class_variable_proxy_;
   Expression* extends_;
   FunctionLiteral* constructor_;
   ZoneList<Property*>* properties_;
   int end_position_;
-  FeedbackVectorSlot slot_;
+  FeedbackVectorSlot prototype_slot_;
+  FeedbackVectorSlot proxy_slot_;
 };
 
 
@@ -3095,12 +3072,11 @@
 
   AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
 
-  VariableDeclaration* NewVariableDeclaration(
-      VariableProxy* proxy, VariableMode mode, Scope* scope, int pos,
-      bool is_class_declaration = false, int declaration_group_start = -1) {
+  VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
+                                              VariableMode mode, Scope* scope,
+                                              int pos) {
     return new (parser_zone_)
-        VariableDeclaration(parser_zone_, proxy, mode, scope, pos,
-                            is_class_declaration, declaration_group_start);
+        VariableDeclaration(parser_zone_, proxy, mode, scope, pos);
   }
 
   FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
@@ -3389,8 +3365,8 @@
         CompareOperation(local_zone_, op, left, right, pos);
   }
 
-  Spread* NewSpread(Expression* expression, int pos) {
-    return new (local_zone_) Spread(local_zone_, expression, pos);
+  Spread* NewSpread(Expression* expression, int pos, int expr_pos) {
+    return new (local_zone_) Spread(local_zone_, expression, pos, expr_pos);
   }
 
   Conditional* NewConditional(Expression* condition,
@@ -3401,12 +3377,9 @@
         local_zone_, condition, then_expression, else_expression, position);
   }
 
-  RewritableAssignmentExpression* NewRewritableAssignmentExpression(
-      Expression* expression) {
+  RewritableExpression* NewRewritableExpression(Expression* expression) {
     DCHECK_NOT_NULL(expression);
-    DCHECK(expression->IsAssignment());
-    return new (local_zone_)
-        RewritableAssignmentExpression(local_zone_, expression);
+    return new (local_zone_) RewritableExpression(local_zone_, expression);
   }
 
   Assignment* NewAssignment(Token::Value op,
@@ -3449,16 +3422,31 @@
         parser_zone_, name, ast_value_factory_, scope, body,
         materialized_literal_count, expected_property_count, parameter_count,
         function_type, has_duplicate_parameters, eager_compile_hint, kind,
-        position);
+        position, true);
   }
 
-  ClassLiteral* NewClassLiteral(const AstRawString* name, Scope* scope,
-                                VariableProxy* proxy, Expression* extends,
+  // Creates a FunctionLiteral representing a top-level script, the
+  // result of an eval (top-level or otherwise), or the result of calling
+  // the Function constructor.
+  FunctionLiteral* NewScriptOrEvalFunctionLiteral(
+      Scope* scope, ZoneList<Statement*>* body, int materialized_literal_count,
+      int expected_property_count) {
+    return new (parser_zone_) FunctionLiteral(
+        parser_zone_, ast_value_factory_->empty_string(), ast_value_factory_,
+        scope, body, materialized_literal_count, expected_property_count, 0,
+        FunctionLiteral::kAnonymousExpression,
+        FunctionLiteral::kNoDuplicateParameters,
+        FunctionLiteral::kShouldLazyCompile, FunctionKind::kNormalFunction, 0,
+        false);
+  }
+
+  ClassLiteral* NewClassLiteral(Scope* scope, VariableProxy* proxy,
+                                Expression* extends,
                                 FunctionLiteral* constructor,
                                 ZoneList<ObjectLiteral::Property*>* properties,
                                 int start_position, int end_position) {
     return new (parser_zone_)
-        ClassLiteral(parser_zone_, name, scope, proxy, extends, constructor,
+        ClassLiteral(parser_zone_, scope, proxy, extends, constructor,
                      properties, start_position, end_position);
   }
 
@@ -3529,6 +3517,46 @@
 };
 
 
+// Type testing & conversion functions overridden by concrete subclasses.
+// Inline functions for AstNode.
+
+#define DECLARE_NODE_FUNCTIONS(type)                                          \
+  bool AstNode::Is##type() const {                                            \
+    NodeType mine = node_type();                                              \
+    if (mine == AstNode::kRewritableExpression &&                             \
+        AstNode::k##type != AstNode::kRewritableExpression)                   \
+      mine = reinterpret_cast<const RewritableExpression*>(this)              \
+                 ->expression()                                               \
+                 ->node_type();                                               \
+    return mine == AstNode::k##type;                                          \
+  }                                                                           \
+  type* AstNode::As##type() {                                                 \
+    NodeType mine = node_type();                                              \
+    AstNode* result = this;                                                   \
+    if (mine == AstNode::kRewritableExpression &&                             \
+        AstNode::k##type != AstNode::kRewritableExpression) {                 \
+      result =                                                                \
+          reinterpret_cast<const RewritableExpression*>(this)->expression();  \
+      mine = result->node_type();                                             \
+    }                                                                         \
+    return mine == AstNode::k##type ? reinterpret_cast<type*>(result) : NULL; \
+  }                                                                           \
+  const type* AstNode::As##type() const {                                     \
+    NodeType mine = node_type();                                              \
+    const AstNode* result = this;                                             \
+    if (mine == AstNode::kRewritableExpression &&                             \
+        AstNode::k##type != AstNode::kRewritableExpression) {                 \
+      result =                                                                \
+          reinterpret_cast<const RewritableExpression*>(this)->expression();  \
+      mine = result->node_type();                                             \
+    }                                                                         \
+    return mine == AstNode::k##type ? reinterpret_cast<const type*>(result)   \
+                                    : NULL;                                   \
+  }
+AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
+#undef DECLARE_NODE_FUNCTIONS
+
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ast/modules.cc b/src/ast/modules.cc
index 225cd8d..f895756 100644
--- a/src/ast/modules.cc
+++ b/src/ast/modules.cc
@@ -13,7 +13,6 @@
 void ModuleDescriptor::AddLocalExport(const AstRawString* export_name,
                                       const AstRawString* local_name,
                                       Zone* zone, bool* ok) {
-  DCHECK(!IsFrozen());
   void* key = const_cast<AstRawString*>(export_name);
 
   ZoneAllocationPolicy allocator(zone);
diff --git a/src/ast/modules.h b/src/ast/modules.h
index e3c66dc..1fdf526 100644
--- a/src/ast/modules.h
+++ b/src/ast/modules.h
@@ -26,8 +26,7 @@
   // ---------------------------------------------------------------------------
   // Mutators.
 
-  // Add a name to the list of exports. If it already exists, or this descriptor
-  // is frozen, that's an error.
+  // Add a name to the list of exports. If it already exists, that's an error.
   void AddLocalExport(const AstRawString* export_name,
                       const AstRawString* local_name, Zone* zone, bool* ok);
 
@@ -35,30 +34,22 @@
   // if not already present.
   void AddModuleRequest(const AstRawString* module_specifier, Zone* zone);
 
-  // Do not allow any further refinements, directly or through unification.
-  void Freeze() { frozen_ = true; }
-
   // Assign an index.
   void Allocate(int index) {
-    DCHECK(IsFrozen() && index_ == -1);
+    DCHECK_EQ(-1, index_);
     index_ = index;
   }
 
   // ---------------------------------------------------------------------------
   // Accessors.
 
-  // Check whether this is closed (i.e. fully determined).
-  bool IsFrozen() { return frozen_; }
-
   int Length() {
-    DCHECK(IsFrozen());
     ZoneHashMap* exports = exports_;
     return exports ? exports->occupancy() : 0;
   }
 
   // The context slot in the hosting script context pointing to this module.
   int Index() {
-    DCHECK(IsFrozen());
     return index_;
   }
 
@@ -104,12 +95,8 @@
   // Implementation.
  private:
   explicit ModuleDescriptor(Zone* zone)
-      : frozen_(false),
-        exports_(NULL),
-        requested_modules_(1, zone),
-        index_(-1) {}
+      : exports_(NULL), requested_modules_(1, zone), index_(-1) {}
 
-  bool frozen_;
   ZoneHashMap* exports_;   // Module exports and their types (allocated lazily)
   ZoneList<const AstRawString*> requested_modules_;
   int index_;
diff --git a/src/ast/prettyprinter.cc b/src/ast/prettyprinter.cc
index 1f6b8c3..0e9986a 100644
--- a/src/ast/prettyprinter.cc
+++ b/src/ast/prettyprinter.cc
@@ -412,8 +412,7 @@
 }
 
 
-void CallPrinter::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* node) {
+void CallPrinter::VisitRewritableExpression(RewritableExpression* node) {
   Find(node->expression());
 }
 
@@ -719,7 +718,7 @@
 
 void PrettyPrinter::VisitClassLiteral(ClassLiteral* node) {
   Print("(class ");
-  PrintLiteral(node->name(), false);
+  PrintLiteral(node->constructor()->name(), false);
   if (node->extends()) {
     Print(" extends ");
     Visit(node->extends());
@@ -929,8 +928,7 @@
 }
 
 
-void PrettyPrinter::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* node) {
+void PrettyPrinter::VisitRewritableExpression(RewritableExpression* node) {
   Visit(node->expression());
 }
 
@@ -1203,6 +1201,14 @@
 }
 
 
+void AstPrinter::PrintOut(Isolate* isolate, AstNode* node) {
+  AstPrinter printer(isolate);
+  printer.Init();
+  printer.Visit(node);
+  PrintF("%s", printer.Output());
+}
+
+
 void AstPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
   if (declarations->length() > 0) {
     IndentedScope indent(this, "DECLS");
@@ -1390,6 +1396,10 @@
   PrintIndentedVisit("FOR", node->each());
   PrintIndentedVisit("OF", node->iterable());
   PrintIndentedVisit("BODY", node->body());
+  PrintIndentedVisit("INIT", node->assign_iterator());
+  PrintIndentedVisit("NEXT", node->next_result());
+  PrintIndentedVisit("EACH", node->assign_each());
+  PrintIndentedVisit("DONE", node->result_done());
 }
 
 
@@ -1429,9 +1439,7 @@
 
 void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
   IndentedScope indent(this, "CLASS LITERAL", node->position());
-  if (node->raw_name() != nullptr) {
-    PrintLiteralIndented("NAME", node->name(), false);
-  }
+  PrintLiteralIndented("NAME", node->constructor()->name(), false);
   if (node->extends() != nullptr) {
     PrintIndentedVisit("EXTENDS", node->extends());
   }
@@ -1544,31 +1552,36 @@
 
 
 void AstPrinter::VisitVariableProxy(VariableProxy* node) {
-  Variable* var = node->var();
   EmbeddedVector<char, 128> buf;
   int pos =
       FormatSlotNode(&buf, node, "VAR PROXY", node->VariableFeedbackSlot());
 
-  switch (var->location()) {
-    case VariableLocation::UNALLOCATED:
-      break;
-    case VariableLocation::PARAMETER:
-      SNPrintF(buf + pos, " parameter[%d]", var->index());
-      break;
-    case VariableLocation::LOCAL:
-      SNPrintF(buf + pos, " local[%d]", var->index());
-      break;
-    case VariableLocation::CONTEXT:
-      SNPrintF(buf + pos, " context[%d]", var->index());
-      break;
-    case VariableLocation::GLOBAL:
-      SNPrintF(buf + pos, " global[%d]", var->index());
-      break;
-    case VariableLocation::LOOKUP:
-      SNPrintF(buf + pos, " lookup");
-      break;
+  if (!node->is_resolved()) {
+    SNPrintF(buf + pos, " unresolved");
+    PrintLiteralWithModeIndented(buf.start(), nullptr, node->name());
+  } else {
+    Variable* var = node->var();
+    switch (var->location()) {
+      case VariableLocation::UNALLOCATED:
+        break;
+      case VariableLocation::PARAMETER:
+        SNPrintF(buf + pos, " parameter[%d]", var->index());
+        break;
+      case VariableLocation::LOCAL:
+        SNPrintF(buf + pos, " local[%d]", var->index());
+        break;
+      case VariableLocation::CONTEXT:
+        SNPrintF(buf + pos, " context[%d]", var->index());
+        break;
+      case VariableLocation::GLOBAL:
+        SNPrintF(buf + pos, " global[%d]", var->index());
+        break;
+      case VariableLocation::LOOKUP:
+        SNPrintF(buf + pos, " lookup");
+        break;
+    }
+    PrintLiteralWithModeIndented(buf.start(), var, node->name());
   }
-  PrintLiteralWithModeIndented(buf.start(), var, node->name());
 }
 
 
@@ -1580,7 +1593,9 @@
 
 
 void AstPrinter::VisitYield(Yield* node) {
-  IndentedScope indent(this, "YIELD", node->position());
+  EmbeddedVector<char, 128> buf;
+  SNPrintF(buf, "YIELD (kind %d)", node->yield_kind());
+  IndentedScope indent(this, buf.start(), node->position());
   Visit(node->expression());
 }
 
@@ -1608,7 +1623,9 @@
 
 void AstPrinter::VisitCall(Call* node) {
   EmbeddedVector<char, 128> buf;
-  FormatSlotNode(&buf, node, "CALL", node->CallFeedbackICSlot());
+  const char* name =
+      node->tail_call_mode() == TailCallMode::kAllow ? "TAIL CALL" : "CALL";
+  FormatSlotNode(&buf, node, name, node->CallFeedbackICSlot());
   IndentedScope indent(this, buf.start());
 
   Visit(node->expression());
@@ -1686,8 +1703,7 @@
 }
 
 
-void AstPrinter::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* node) {
+void AstPrinter::VisitRewritableExpression(RewritableExpression* node) {
   Visit(node->expression());
 }
 
diff --git a/src/ast/prettyprinter.h b/src/ast/prettyprinter.h
index 7e4dcdc..0186203 100644
--- a/src/ast/prettyprinter.h
+++ b/src/ast/prettyprinter.h
@@ -104,6 +104,9 @@
 
   const char* PrintProgram(FunctionLiteral* program);
 
+  // Print a node to stdout.
+  static void PrintOut(Isolate* isolate, AstNode* node);
+
   // Individual nodes
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
diff --git a/src/ast/scopeinfo.cc b/src/ast/scopeinfo.cc
index 668879f..4ffc020 100644
--- a/src/ast/scopeinfo.cc
+++ b/src/ast/scopeinfo.cc
@@ -19,16 +19,12 @@
   ZoneList<Variable*> stack_locals(scope->StackLocalCount(), zone);
   ZoneList<Variable*> context_locals(scope->ContextLocalCount(), zone);
   ZoneList<Variable*> context_globals(scope->ContextGlobalCount(), zone);
-  ZoneList<Variable*> strong_mode_free_variables(0, zone);
 
   scope->CollectStackAndContextLocals(&stack_locals, &context_locals,
-                                      &context_globals,
-                                      &strong_mode_free_variables);
+                                      &context_globals);
   const int stack_local_count = stack_locals.length();
   const int context_local_count = context_locals.length();
   const int context_global_count = context_globals.length();
-  const int strong_mode_free_variable_count =
-      strong_mode_free_variables.length();
   // Make sure we allocate the correct amount.
   DCHECK_EQ(scope->ContextLocalCount(), context_local_count);
   DCHECK_EQ(scope->ContextGlobalCount(), context_global_count);
@@ -77,7 +73,6 @@
   const int length = kVariablePartIndex + parameter_count +
                      (1 + stack_local_count) + 2 * context_local_count +
                      2 * context_global_count +
-                     3 * strong_mode_free_variable_count +
                      (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0);
 
   Factory* factory = isolate->factory();
@@ -104,7 +99,6 @@
   scope_info->SetStackLocalCount(stack_local_count);
   scope_info->SetContextLocalCount(context_local_count);
   scope_info->SetContextGlobalCount(context_global_count);
-  scope_info->SetStrongModeFreeVariableCount(strong_mode_free_variable_count);
 
   int index = kVariablePartIndex;
   // Add parameters.
@@ -173,25 +167,6 @@
     scope_info->set(index++, Smi::FromInt(value));
   }
 
-  DCHECK(index == scope_info->StrongModeFreeVariableNameEntriesIndex());
-  for (int i = 0; i < strong_mode_free_variable_count; ++i) {
-    scope_info->set(index++, *strong_mode_free_variables[i]->name());
-  }
-
-  DCHECK(index == scope_info->StrongModeFreeVariablePositionEntriesIndex());
-  for (int i = 0; i < strong_mode_free_variable_count; ++i) {
-    // Unfortunately, the source code positions are stored as int even though
-    // int32_t would be enough (given the maximum source code length).
-    Handle<Object> start_position = factory->NewNumberFromInt(
-        static_cast<int32_t>(strong_mode_free_variables[i]
-                                 ->strong_mode_reference_start_position()));
-    scope_info->set(index++, *start_position);
-    Handle<Object> end_position = factory->NewNumberFromInt(
-        static_cast<int32_t>(strong_mode_free_variables[i]
-                                 ->strong_mode_reference_end_position()));
-    scope_info->set(index++, *end_position);
-  }
-
   // If the receiver is allocated, add its index.
   DCHECK(index == scope_info->ReceiverEntryIndex());
   if (has_receiver) {
@@ -226,7 +201,6 @@
   const int stack_local_count = 0;
   const int context_local_count = 1;
   const int context_global_count = 0;
-  const int strong_mode_free_variable_count = 0;
   const bool has_simple_parameters = true;
   const VariableAllocationInfo receiver_info = CONTEXT;
   const VariableAllocationInfo function_name_info = NONE;
@@ -237,7 +211,6 @@
   const int length = kVariablePartIndex + parameter_count +
                      (1 + stack_local_count) + 2 * context_local_count +
                      2 * context_global_count +
-                     3 * strong_mode_free_variable_count +
                      (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0);
 
   Factory* factory = isolate->factory();
@@ -259,7 +232,6 @@
   scope_info->SetStackLocalCount(stack_local_count);
   scope_info->SetContextLocalCount(context_local_count);
   scope_info->SetContextGlobalCount(context_global_count);
-  scope_info->SetStrongModeFreeVariableCount(strong_mode_free_variable_count);
 
   int index = kVariablePartIndex;
   const int first_slot_index = 0;
@@ -276,9 +248,6 @@
                          ContextLocalMaybeAssignedFlag::encode(kNotAssigned);
   scope_info->set(index++, Smi::FromInt(value));
 
-  DCHECK(index == scope_info->StrongModeFreeVariableNameEntriesIndex());
-  DCHECK(index == scope_info->StrongModeFreeVariablePositionEntriesIndex());
-
   // And here we record that this scopeinfo binds a receiver.
   DCHECK(index == scope_info->ReceiverEntryIndex());
   const int receiver_index = Context::MIN_CONTEXT_SLOTS + 0;
@@ -482,35 +451,6 @@
 }
 
 
-String* ScopeInfo::StrongModeFreeVariableName(int var) {
-  DCHECK(0 <= var && var < StrongModeFreeVariableCount());
-  int info_index = StrongModeFreeVariableNameEntriesIndex() + var;
-  return String::cast(get(info_index));
-}
-
-
-int ScopeInfo::StrongModeFreeVariableStartPosition(int var) {
-  DCHECK(0 <= var && var < StrongModeFreeVariableCount());
-  int info_index = StrongModeFreeVariablePositionEntriesIndex() + var * 2;
-  int32_t value = 0;
-  bool ok = get(info_index)->ToInt32(&value);
-  USE(ok);
-  DCHECK(ok);
-  return value;
-}
-
-
-int ScopeInfo::StrongModeFreeVariableEndPosition(int var) {
-  DCHECK(0 <= var && var < StrongModeFreeVariableCount());
-  int info_index = StrongModeFreeVariablePositionEntriesIndex() + var * 2 + 1;
-  int32_t value = 0;
-  bool ok = get(info_index)->ToInt32(&value);
-  USE(ok);
-  DCHECK(ok);
-  return value;
-}
-
-
 int ScopeInfo::StackSlotIndex(String* name) {
   DCHECK(name->IsInternalizedString());
   if (length() > 0) {
@@ -691,20 +631,8 @@
 }
 
 
-int ScopeInfo::StrongModeFreeVariableNameEntriesIndex() {
-  return ContextGlobalInfoEntriesIndex() + ContextGlobalCount();
-}
-
-
-int ScopeInfo::StrongModeFreeVariablePositionEntriesIndex() {
-  return StrongModeFreeVariableNameEntriesIndex() +
-         StrongModeFreeVariableCount();
-}
-
-
 int ScopeInfo::ReceiverEntryIndex() {
-  return StrongModeFreeVariablePositionEntriesIndex() +
-         2 * StrongModeFreeVariableCount();
+  return ContextGlobalInfoEntriesIndex() + ContextGlobalCount();
 }
 
 
diff --git a/src/ast/scopes.cc b/src/ast/scopes.cc
index c2b05b7..7c87ce3 100644
--- a/src/ast/scopes.cc
+++ b/src/ast/scopes.cc
@@ -27,12 +27,10 @@
       zone_(zone) {}
 VariableMap::~VariableMap() {}
 
-
 Variable* VariableMap::Declare(Scope* scope, const AstRawString* name,
                                VariableMode mode, Variable::Kind kind,
                                InitializationFlag initialization_flag,
-                               MaybeAssignedFlag maybe_assigned_flag,
-                               int declaration_group_start) {
+                               MaybeAssignedFlag maybe_assigned_flag) {
   // AstRawStrings are unambiguous, i.e., the same string is always represented
   // by the same AstRawString*.
   // FIXME(marja): fix the type of Lookup.
@@ -42,14 +40,8 @@
   if (p->value == NULL) {
     // The variable has not been declared yet -> insert it.
     DCHECK(p->key == name);
-    if (kind == Variable::CLASS) {
-      p->value = new (zone())
-          ClassVariable(scope, name, mode, initialization_flag,
-                        maybe_assigned_flag, declaration_group_start);
-    } else {
-      p->value = new (zone()) Variable(
-          scope, name, mode, kind, initialization_flag, maybe_assigned_flag);
-    }
+    p->value = new (zone()) Variable(scope, name, mode, kind,
+                                     initialization_flag, maybe_assigned_flag);
   }
   return reinterpret_cast<Variable*>(p->value);
 }
@@ -103,8 +95,7 @@
       sloppy_block_function_map_(zone),
       already_resolved_(false),
       ast_value_factory_(ast_value_factory),
-      zone_(zone),
-      class_declaration_group_start_(-1) {
+      zone_(zone) {
   SetDefaults(scope_type, outer_scope, Handle<ScopeInfo>::null(),
               function_kind);
   // The outermost scope must be a script scope.
@@ -112,7 +103,6 @@
   DCHECK(!HasIllegalRedeclaration());
 }
 
-
 Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
              Handle<ScopeInfo> scope_info, AstValueFactory* value_factory)
     : inner_scopes_(4, zone),
@@ -125,8 +115,7 @@
       sloppy_block_function_map_(zone),
       already_resolved_(true),
       ast_value_factory_(value_factory),
-      zone_(zone),
-      class_declaration_group_start_(-1) {
+      zone_(zone) {
   SetDefaults(scope_type, NULL, scope_info);
   if (!scope_info.is_null()) {
     num_heap_slots_ = scope_info_->ContextLength();
@@ -137,7 +126,6 @@
   AddInnerScope(inner_scope);
 }
 
-
 Scope::Scope(Zone* zone, Scope* inner_scope,
              const AstRawString* catch_variable_name,
              AstValueFactory* value_factory)
@@ -151,8 +139,7 @@
       sloppy_block_function_map_(zone),
       already_resolved_(true),
       ast_value_factory_(value_factory),
-      zone_(zone),
-      class_declaration_group_start_(-1) {
+      zone_(zone) {
   SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
   AddInnerScope(inner_scope);
   ++num_var_or_const_;
@@ -528,19 +515,17 @@
   return var;
 }
 
-
 Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
                               InitializationFlag init_flag, Variable::Kind kind,
-                              MaybeAssignedFlag maybe_assigned_flag,
-                              int declaration_group_start) {
+                              MaybeAssignedFlag maybe_assigned_flag) {
   DCHECK(!already_resolved());
   // This function handles VAR, LET, and CONST modes.  DYNAMIC variables are
-  // introduces during variable allocation, and TEMPORARY variables are
+  // introduced during variable allocation, and TEMPORARY variables are
   // allocated via NewTemporary().
   DCHECK(IsDeclaredVariableMode(mode));
   ++num_var_or_const_;
   return variables_.Declare(this, name, mode, kind, init_flag,
-                            maybe_assigned_flag, declaration_group_start);
+                            maybe_assigned_flag);
 }
 
 
@@ -660,11 +645,9 @@
   int order_;
 };
 
-
-void Scope::CollectStackAndContextLocals(
-    ZoneList<Variable*>* stack_locals, ZoneList<Variable*>* context_locals,
-    ZoneList<Variable*>* context_globals,
-    ZoneList<Variable*>* strong_mode_free_variables) {
+void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+                                         ZoneList<Variable*>* context_locals,
+                                         ZoneList<Variable*>* context_globals) {
   DCHECK(stack_locals != NULL);
   DCHECK(context_locals != NULL);
   DCHECK(context_globals != NULL);
@@ -691,11 +674,6 @@
        p != NULL;
        p = variables_.Next(p)) {
     Variable* var = reinterpret_cast<Variable*>(p->value);
-    if (strong_mode_free_variables && var->has_strong_mode_reference() &&
-        var->mode() == DYNAMIC_GLOBAL) {
-      strong_mode_free_variables->Add(var, zone());
-    }
-
     if (var->is_used()) {
       vars.Add(VarAndOrder(var, p->order), zone());
     }
@@ -1017,9 +995,7 @@
   if (HasTrivialOuterContext()) {
     Indent(n1, "// scope has trivial outer context\n");
   }
-  if (is_strong(language_mode())) {
-    Indent(n1, "// strong mode scope\n");
-  } else if (is_strict(language_mode())) {
+  if (is_strict(language_mode())) {
     Indent(n1, "// strict mode scope\n");
   }
   if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
@@ -1204,10 +1180,6 @@
 
   switch (binding_kind) {
     case BOUND:
-      // We found a variable binding.
-      if (is_strong(language_mode())) {
-        if (!CheckStrongModeDeclaration(proxy, var)) return false;
-      }
       break;
 
     case BOUND_EVAL_SHADOWED:
@@ -1245,126 +1217,12 @@
   DCHECK(var != NULL);
   if (proxy->is_assigned()) var->set_maybe_assigned();
 
-  if (is_strong(language_mode())) {
-    // Record that the variable is referred to from strong mode. Also, record
-    // the position.
-    var->RecordStrongModeReference(proxy->position(), proxy->end_position());
-  }
-
   proxy->BindTo(var);
 
   return true;
 }
 
 
-bool Scope::CheckStrongModeDeclaration(VariableProxy* proxy, Variable* var) {
-  // Check for declaration-after use (for variables) in strong mode. Note that
-  // we can only do this in the case where we have seen the declaration. And we
-  // always allow referencing functions (for now).
-
-  // This might happen during lazy compilation; we don't keep track of
-  // initializer positions for variables stored in ScopeInfo, so we cannot check
-  // bindings against them. TODO(marja, rossberg): remove this hack.
-  if (var->initializer_position() == RelocInfo::kNoPosition) return true;
-
-  // Allow referencing the class name from methods of that class, even though
-  // the initializer position for class names is only after the body.
-  Scope* scope = this;
-  while (scope) {
-    if (scope->ClassVariableForMethod() == var) return true;
-    scope = scope->outer_scope();
-  }
-
-  // Allow references from methods to classes declared later, if we detect no
-  // problematic dependency cycles. Note that we can be inside multiple methods
-  // at the same time, and it's enough if we find one where the reference is
-  // allowed.
-  if (var->is_class() &&
-      var->AsClassVariable()->declaration_group_start() >= 0) {
-    for (scope = this; scope && scope != var->scope();
-         scope = scope->outer_scope()) {
-      ClassVariable* class_var = scope->ClassVariableForMethod();
-      // A method is referring to some other class, possibly declared
-      // later. Referring to a class declared earlier is always OK and covered
-      // by the code outside this if. Here we only need to allow special cases
-      // for referring to a class which is declared later.
-
-      // Referring to a class C declared later is OK under the following
-      // circumstances:
-
-      // 1. The class declarations are in a consecutive group with no other
-      // declarations or statements in between, and
-
-      // 2. There is no dependency cycle where the first edge is an
-      // initialization time dependency (computed property name or extends
-      // clause) from C to something that depends on this class directly or
-      // transitively.
-      if (class_var &&
-          class_var->declaration_group_start() ==
-              var->AsClassVariable()->declaration_group_start()) {
-        return true;
-      }
-
-      // TODO(marja,rossberg): implement the dependency cycle detection. Here we
-      // undershoot the target and allow referring to any class in the same
-      // consectuive declaration group.
-
-      // The cycle detection can work roughly like this: 1) detect init-time
-      // references here (they are free variables which are inside the class
-      // scope but not inside a method scope - no parser changes needed to
-      // detect them) 2) if we encounter an init-time reference here, allow it,
-      // but record it for a later dependency cycle check 3) also record
-      // non-init-time references here 4) after scope analysis is done, analyse
-      // the dependency cycles: an illegal cycle is one starting with an
-      // init-time reference and leading back to the starting point with either
-      // non-init-time and init-time references.
-    }
-  }
-
-  // If both the use and the declaration are inside an eval scope (possibly
-  // indirectly), or one of them is, we need to check whether they are inside
-  // the same eval scope or different ones.
-
-  // TODO(marja,rossberg): Detect errors across different evals (depends on the
-  // future of eval in strong mode).
-  const Scope* eval_for_use = NearestOuterEvalScope();
-  const Scope* eval_for_declaration = var->scope()->NearestOuterEvalScope();
-
-  if (proxy->position() != RelocInfo::kNoPosition &&
-      proxy->position() < var->initializer_position() && !var->is_function() &&
-      eval_for_use == eval_for_declaration) {
-    DCHECK(proxy->end_position() != RelocInfo::kNoPosition);
-    ReportMessage(proxy->position(), proxy->end_position(),
-                  MessageTemplate::kStrongUseBeforeDeclaration,
-                  proxy->raw_name());
-    return false;
-  }
-  return true;
-}
-
-
-ClassVariable* Scope::ClassVariableForMethod() const {
-  // TODO(marja, rossberg): This fails to find a class variable in the following
-  // cases:
-  // let A = class { ... }
-  // It needs to be investigated whether this causes any practical problems.
-  if (!is_function_scope()) return nullptr;
-  if (IsInObjectLiteral(function_kind_)) return nullptr;
-  if (!IsConciseMethod(function_kind_) && !IsClassConstructor(function_kind_) &&
-      !IsAccessorFunction(function_kind_)) {
-    return nullptr;
-  }
-  DCHECK_NOT_NULL(outer_scope_);
-  // The class scope contains at most one variable, the class name.
-  DCHECK(outer_scope_->variables_.occupancy() <= 1);
-  if (outer_scope_->variables_.occupancy() == 0) return nullptr;
-  VariableMap::Entry* p = outer_scope_->variables_.Start();
-  Variable* var = reinterpret_cast<Variable*>(p->value);
-  if (!var->is_class()) return nullptr;
-  return var->AsClassVariable();
-}
-
-
 bool Scope::ResolveVariablesRecursively(ParseInfo* info,
                                         AstNodeFactory* factory) {
   DCHECK(info->script_scope()->is_script_scope());
@@ -1646,7 +1504,7 @@
   }
 
   // If scope is already resolved, we still need to allocate
-  // variables in inner scopes which might not had been resolved yet.
+  // variables in inner scopes which might not have been resolved yet.
   if (already_resolved()) return;
   // The number of slots required for variables.
   num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
diff --git a/src/ast/scopes.h b/src/ast/scopes.h
index 6c261f6..76f761d 100644
--- a/src/ast/scopes.h
+++ b/src/ast/scopes.h
@@ -24,8 +24,7 @@
 
   Variable* Declare(Scope* scope, const AstRawString* name, VariableMode mode,
                     Variable::Kind kind, InitializationFlag initialization_flag,
-                    MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
-                    int declaration_group_start = -1);
+                    MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
 
   Variable* Lookup(const AstRawString* name);
 
@@ -163,8 +162,7 @@
   // declared before, the previously declared variable is returned.
   Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
                          InitializationFlag init_flag, Variable::Kind kind,
-                         MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
-                         int declaration_group_start = -1);
+                         MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
 
   // Declare an implicit global variable in this scope which must be a
   // script scope.  The variable was introduced (possibly from an inner
@@ -377,12 +375,6 @@
              IsClassConstructor(function_kind())));
   }
 
-  const Scope* NearestOuterEvalScope() const {
-    if (is_eval_scope()) return this;
-    if (outer_scope() == nullptr) return nullptr;
-    return outer_scope()->NearestOuterEvalScope();
-  }
-
   // ---------------------------------------------------------------------------
   // Accessors.
 
@@ -428,7 +420,24 @@
   // Returns the default function arity excluding default or rest parameters.
   int default_function_length() const { return arity_; }
 
-  int num_parameters() const { return params_.length(); }
+  // Returns the number of formal parameters, up to but not including the
+  // rest parameter index (if the function has rest parameters), i.e. it
+  // says 2 for
+  //
+  //   function foo(a, b) { ... }
+  //
+  // and
+  //
+  //   function foo(a, b, ...c) { ... }
+  //
+  // but for
+  //
+  //   function foo(a, b, c = 1) { ... }
+  //
+  // we return 3 here.
+  int num_parameters() const {
+    return has_rest_parameter() ? params_.length() - 1 : params_.length();
+  }
 
   // A function can have at most one rest parameter. Returns Variable* or NULL.
   Variable* rest_parameter(int* index) const {
@@ -486,25 +495,15 @@
   // The ModuleDescriptor for this scope; only for module scopes.
   ModuleDescriptor* module() const { return module_descriptor_; }
 
-
-  void set_class_declaration_group_start(int position) {
-    class_declaration_group_start_ = position;
-  }
-
-  int class_declaration_group_start() const {
-    return class_declaration_group_start_;
-  }
-
   // ---------------------------------------------------------------------------
   // Variable allocation.
 
   // Collect stack and context allocated local variables in this scope. Note
   // that the function variable - if present - is not collected and should be
   // handled separately.
-  void CollectStackAndContextLocals(
-      ZoneList<Variable*>* stack_locals, ZoneList<Variable*>* context_locals,
-      ZoneList<Variable*>* context_globals,
-      ZoneList<Variable*>* strong_mode_free_variables = nullptr);
+  void CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+                                    ZoneList<Variable*>* context_locals,
+                                    ZoneList<Variable*>* context_globals);
 
   // Current number of var or const locals.
   int num_var_or_const() { return num_var_or_const_; }
@@ -767,12 +766,6 @@
   MUST_USE_RESULT
   bool ResolveVariablesRecursively(ParseInfo* info, AstNodeFactory* factory);
 
-  bool CheckStrongModeDeclaration(VariableProxy* proxy, Variable* var);
-
-  // If this scope is a method scope of a class, return the corresponding
-  // class variable, otherwise nullptr.
-  ClassVariable* ClassVariableForMethod() const;
-
   // Scope analysis.
   void PropagateScopeInfo(bool outer_scope_calls_sloppy_eval);
   bool HasTrivialContext() const;
@@ -837,10 +830,6 @@
   Zone* zone_;
 
   PendingCompilationErrorHandler pending_error_handler_;
-
-  // For tracking which classes are declared consecutively. Needed for strong
-  // mode.
-  int class_declaration_group_start_;
 };
 
 }  // namespace internal
diff --git a/src/ast/variables.cc b/src/ast/variables.cc
index 8e00782..7b9a5d2 100644
--- a/src/ast/variables.cc
+++ b/src/ast/variables.cc
@@ -40,9 +40,6 @@
       location_(VariableLocation::UNALLOCATED),
       index_(-1),
       initializer_position_(RelocInfo::kNoPosition),
-      has_strong_mode_reference_(false),
-      strong_mode_reference_start_position_(RelocInfo::kNoPosition),
-      strong_mode_reference_end_position_(RelocInfo::kNoPosition),
       local_if_not_shadowed_(NULL),
       is_from_eval_(false),
       force_context_allocation_(false),
diff --git a/src/ast/variables.h b/src/ast/variables.h
index ca5d1cd..b8bb07e 100644
--- a/src/ast/variables.h
+++ b/src/ast/variables.h
@@ -15,12 +15,9 @@
 // variables. Variables themselves are never directly referred to from the AST,
 // they are maintained by scopes, and referred to from VariableProxies and Slots
 // after binding and variable allocation.
-
-class ClassVariable;
-
 class Variable: public ZoneObject {
  public:
-  enum Kind { NORMAL, FUNCTION, CLASS, THIS, ARGUMENTS };
+  enum Kind { NORMAL, FUNCTION, THIS, ARGUMENTS };
 
   Variable(Scope* scope, const AstRawString* name, VariableMode mode, Kind kind,
            InitializationFlag initialization_flag,
@@ -84,7 +81,6 @@
   }
 
   bool is_function() const { return kind_ == FUNCTION; }
-  bool is_class() const { return kind_ == CLASS; }
   bool is_this() const { return kind_ == THIS; }
   bool is_arguments() const { return kind_ == ARGUMENTS; }
 
@@ -98,11 +94,6 @@
     return is_this() || *name() == *isolate->factory()->this_string();
   }
 
-  ClassVariable* AsClassVariable() {
-    DCHECK(is_class());
-    return reinterpret_cast<ClassVariable*>(this);
-  }
-
   // True if the variable is named eval and not known to be shadowed.
   bool is_possibly_eval(Isolate* isolate) const {
     return IsVariable(isolate->factory()->eval_string());
@@ -132,24 +123,6 @@
 
   static int CompareIndex(Variable* const* v, Variable* const* w);
 
-  void RecordStrongModeReference(int start_position, int end_position) {
-    // Record the earliest reference to the variable. Used in error messages for
-    // strong mode references to undeclared variables.
-    if (has_strong_mode_reference_ &&
-        strong_mode_reference_start_position_ < start_position)
-      return;
-    has_strong_mode_reference_ = true;
-    strong_mode_reference_start_position_ = start_position;
-    strong_mode_reference_end_position_ = end_position;
-  }
-
-  bool has_strong_mode_reference() const { return has_strong_mode_reference_; }
-  int strong_mode_reference_start_position() const {
-    return strong_mode_reference_start_position_;
-  }
-  int strong_mode_reference_end_position() const {
-    return strong_mode_reference_end_position_;
-  }
   PropertyAttributes DeclarationPropertyAttributes() const {
     int property_attributes = NONE;
     if (IsImmutableVariableMode(mode_)) {
@@ -169,11 +142,6 @@
   VariableLocation location_;
   int index_;
   int initializer_position_;
-  // Tracks whether the variable is bound to a VariableProxy which is in strong
-  // mode, and if yes, the source location of the reference.
-  bool has_strong_mode_reference_;
-  int strong_mode_reference_start_position_;
-  int strong_mode_reference_end_position_;
 
   // If this field is set, this variable references the stored locally bound
   // variable, but it might be shadowed by variable bindings introduced by
@@ -190,28 +158,6 @@
   InitializationFlag initialization_flag_;
   MaybeAssignedFlag maybe_assigned_;
 };
-
-class ClassVariable : public Variable {
- public:
-  ClassVariable(Scope* scope, const AstRawString* name, VariableMode mode,
-                InitializationFlag initialization_flag,
-                MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
-                int declaration_group_start = -1)
-      : Variable(scope, name, mode, Variable::CLASS, initialization_flag,
-                 maybe_assigned_flag),
-        declaration_group_start_(declaration_group_start) {}
-
-  int declaration_group_start() const { return declaration_group_start_; }
-  void set_declaration_group_start(int declaration_group_start) {
-    declaration_group_start_ = declaration_group_start;
-  }
-
- private:
-  // For classes we keep track of consecutive groups of delcarations. They are
-  // needed for strong mode scoping checks. TODO(marja, rossberg): Implement
-  // checks for functions too.
-  int declaration_group_start_;
-};
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/bailout-reason.h b/src/bailout-reason.h
index 83898d1..272b6a4 100644
--- a/src/bailout-reason.h
+++ b/src/bailout-reason.h
@@ -78,8 +78,6 @@
   V(kExportDeclaration, "Export declaration")                                  \
   V(kExternalStringExpectedButNotFound,                                        \
     "External string expected, but not found")                                 \
-  V(kForInStatementOptimizationIsDisabled,                                     \
-    "ForInStatement optimization is disabled")                                 \
   V(kForInStatementWithNonLocalEachVariable,                                   \
     "ForInStatement with non-local each variable")                             \
   V(kForOfStatement, "ForOfStatement")                                         \
@@ -99,8 +97,6 @@
   V(kImportDeclaration, "Import declaration")                                  \
   V(kIndexIsNegative, "Index is negative")                                     \
   V(kIndexIsTooLarge, "Index is too large")                                    \
-  V(kInlinedRuntimeFunctionFastOneByteArrayJoin,                               \
-    "Inlined runtime function: FastOneByteArrayJoin")                          \
   V(kInliningBailedOut, "Inlining bailed out")                                 \
   V(kInputGPRIsExpectedToHaveUpper32Cleared,                                   \
     "Input GPR is expected to have upper32 cleared")                           \
@@ -131,8 +127,6 @@
   V(kNativeFunctionLiteral, "Native function literal")                         \
   V(kNeedSmiLiteral, "Need a Smi literal here")                                \
   V(kNoCasesLeft, "No cases left")                                             \
-  V(kNoEmptyArraysHereInEmitFastOneByteArrayJoin,                              \
-    "No empty arrays here in EmitFastOneByteArrayJoin")                        \
   V(kNonInitializerAssignmentToConst, "Non-initializer assignment to const")   \
   V(kNonSmiIndex, "Non-smi index")                                             \
   V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal")                  \
@@ -150,6 +144,7 @@
     "Operand is a smi and not a bound function")                               \
   V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function")      \
   V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name")              \
+  V(kOperandIsASmiAndNotAReceiver, "Operand is a smi and not a receiver")      \
   V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string")          \
   V(kOperandIsASmi, "Operand is a smi")                                        \
   V(kOperandIsNotADate, "Operand is not a date")                               \
@@ -157,6 +152,7 @@
   V(kOperandIsNotAFunction, "Operand is not a function")                       \
   V(kOperandIsNotAName, "Operand is not a name")                               \
   V(kOperandIsNotANumber, "Operand is not a number")                           \
+  V(kOperandIsNotAReceiver, "Operand is not a receiver")                       \
   V(kOperandIsNotASmi, "Operand is not a smi")                                 \
   V(kOperandIsNotAString, "Operand is not a string")                           \
   V(kOperandIsNotSmi, "Operand is not smi")                                    \
@@ -183,10 +179,10 @@
     "Sloppy function expects JSReceiver as receiver.")                         \
   V(kSmiAdditionOverflow, "Smi addition overflow")                             \
   V(kSmiSubtractionOverflow, "Smi subtraction overflow")                       \
-  V(kSpread, "Spread in array literal")                                        \
   V(kStackAccessBelowStackPointer, "Stack access below stack pointer")         \
   V(kStackFrameTypesMustMatch, "Stack frame types must match")                 \
   V(kSuperReference, "Super reference")                                        \
+  V(kTailCall, "Tail call")                                                    \
   V(kTheCurrentStackPointerIsBelowCsp,                                         \
     "The current stack pointer is below csp")                                  \
   V(kTheSourceAndDestinationAreTheSame,                                        \
@@ -236,6 +232,7 @@
     "Unexpected number of pre-allocated property fields")                      \
   V(kUnexpectedFPCRMode, "Unexpected FPCR mode.")                              \
   V(kUnexpectedSmi, "Unexpected smi value")                                    \
+  V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen")   \
   V(kUnexpectedStackPointer, "The stack pointer is not the expected value")    \
   V(kUnexpectedStringType, "Unexpected string type")                           \
   V(kUnexpectedTypeForRegExpDataFixedArrayExpected,                            \
@@ -253,6 +250,8 @@
   V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments")         \
   V(kUnsupportedPhiUseOfConstVariable,                                         \
     "Unsupported phi use of const variable")                                   \
+  V(kUnexpectedReturnFromBytecodeHandler,                                      \
+    "Unexpectedly returned from a bytecode handler")                           \
   V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw")          \
   V(kUnsupportedSwitchStatement, "Unsupported switch statement")               \
   V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate")               \
@@ -267,7 +266,6 @@
     "Should not directly enter OSR-compiled function")                         \
   V(kYield, "Yield")
 
-
 #define ERROR_MESSAGES_CONSTANTS(C, T) C,
 enum BailoutReason {
   ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
diff --git a/src/base.isolate b/src/base.isolate
index 8422ec7..b51de01 100644
--- a/src/base.isolate
+++ b/src/base.isolate
@@ -22,6 +22,13 @@
         ],
       },
     }],
+    ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and v8_separate_ignition_snapshot==1', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
+        ],
+      },
+    }],
     ['OS=="linux" and component=="shared_library" and target_arch=="ia32"', {
       'variables': {
         'files': [
diff --git a/src/base/atomicops.h b/src/base/atomicops.h
index 3e628fe..ea33e48 100644
--- a/src/base/atomicops.h
+++ b/src/base/atomicops.h
@@ -157,6 +157,8 @@
 #include "src/base/atomicops_internals_mips_gcc.h"
 #elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64
 #include "src/base/atomicops_internals_mips64_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_S390
+#include "src/base/atomicops_internals_s390_gcc.h"
 #else
 #error "Atomic operations are not supported on your platform"
 #endif
diff --git a/src/base/atomicops_internals_s390_gcc.h b/src/base/atomicops_internals_s390_gcc.h
new file mode 100644
index 0000000..6e34f30
--- /dev/null
+++ b/src/base/atomicops_internals_s390_gcc.h
@@ -0,0 +1,152 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_S390_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_S390_H_
+
+namespace v8 {
+namespace base {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return __sync_add_and_fetch(ptr, increment);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value, Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value, Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() { __sync_synchronize(); }
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#ifdef V8_TARGET_ARCH_S390X
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 old_value;
+  do {
+    old_value = *ptr;
+  } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return __sync_add_and_fetch(ptr, increment);
+}
+
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value, Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value, Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#endif
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_S390_H_
diff --git a/src/base/bits.h b/src/base/bits.h
index 4ba3c47..0e76624 100644
--- a/src/base/bits.h
+++ b/src/base/bits.h
@@ -92,6 +92,20 @@
 }
 
 
+// ReverseBits(value) returns |value| in reverse bit order.
+template <typename T>
+T ReverseBits(T value) {
+  DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) ||
+         (sizeof(value) == 8));
+  T result = 0;
+  for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
+    result = (result << 1) | (value & 1);
+    value >>= 1;
+  }
+  return result;
+}
+
+
 // CountTrailingZeros32(value) returns the number of zero bits preceding the
 // least significant 1 bit in |value| if |value| is non-zero, otherwise it
 // returns 32.
diff --git a/src/base/cpu.cc b/src/base/cpu.cc
index 692494a..777f379 100644
--- a/src/base/cpu.cc
+++ b/src/base/cpu.cc
@@ -312,6 +312,8 @@
       architecture_(0),
       variant_(-1),
       part_(0),
+      icache_line_size_(UNKNOWN_CACHE_LINE_SIZE),
+      dcache_line_size_(UNKNOWN_CACHE_LINE_SIZE),
       has_fpu_(false),
       has_cmov_(false),
       has_sahf_(false),
@@ -644,9 +646,16 @@
       if (n == 0 || entry.a_type == AT_NULL) {
         break;
       }
-      if (entry.a_type == AT_PLATFORM) {
-        auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val);
-        break;
+      switch (entry.a_type) {
+        case AT_PLATFORM:
+          auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val);
+          break;
+        case AT_ICACHEBSIZE:
+          icache_line_size_ = entry.a_un.a_val;
+          break;
+        case AT_DCACHEBSIZE:
+          dcache_line_size_ = entry.a_un.a_val;
+          break;
       }
     }
     fclose(fp);
diff --git a/src/base/cpu.h b/src/base/cpu.h
index ca108fa..3778d27 100644
--- a/src/base/cpu.h
+++ b/src/base/cpu.h
@@ -75,6 +75,9 @@
 
   // General features
   bool has_fpu() const { return has_fpu_; }
+  int icache_line_size() const { return icache_line_size_; }
+  int dcache_line_size() const { return dcache_line_size_; }
+  static const int UNKNOWN_CACHE_LINE_SIZE = 0;
 
   // x86 features
   bool has_cmov() const { return has_cmov_; }
@@ -118,6 +121,8 @@
   int architecture_;
   int variant_;
   int part_;
+  int icache_line_size_;
+  int dcache_line_size_;
   bool has_fpu_;
   bool has_cmov_;
   bool has_sahf_;
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index f68a12a..252c51c 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -134,7 +134,8 @@
  public:
   Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
           v8::Local<v8::ObjectTemplate> global_proxy_template,
-          v8::ExtensionConfiguration* extensions, ContextType context_type);
+          v8::ExtensionConfiguration* extensions,
+          GlobalContextType context_type);
   ~Genesis() { }
 
   Isolate* isolate() const { return isolate_; }
@@ -187,10 +188,10 @@
   // New context initialization.  Used for creating a context from scratch.
   void InitializeGlobal(Handle<JSGlobalObject> global_object,
                         Handle<JSFunction> empty_function,
-                        ContextType context_type);
+                        GlobalContextType context_type);
   void InitializeExperimentalGlobal();
   // Depending on the situation, expose and/or get rid of the utils object.
-  void ConfigureUtilsObject(ContextType context_type);
+  void ConfigureUtilsObject(GlobalContextType context_type);
 
 #define DECLARE_FEATURE_INITIALIZATION(id, descr) \
   void InitializeGlobal_##id();
@@ -206,7 +207,7 @@
   Handle<JSFunction> InstallInternalArray(Handle<JSObject> target,
                                           const char* name,
                                           ElementsKind elements_kind);
-  bool InstallNatives(ContextType context_type);
+  bool InstallNatives(GlobalContextType context_type);
 
   void InstallTypedArray(const char* name, ElementsKind elements_kind,
                          Handle<JSFunction>* fun);
@@ -318,11 +319,10 @@
   v->Synchronize(VisitorSynchronization::kExtensions);
 }
 
-
 Handle<Context> Bootstrapper::CreateEnvironment(
     MaybeHandle<JSGlobalProxy> maybe_global_proxy,
     v8::Local<v8::ObjectTemplate> global_proxy_template,
-    v8::ExtensionConfiguration* extensions, ContextType context_type) {
+    v8::ExtensionConfiguration* extensions, GlobalContextType context_type) {
   HandleScope scope(isolate_);
   Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template,
                   extensions, context_type);
@@ -483,7 +483,7 @@
 Handle<Map> Genesis::CreateSloppyFunctionMap(FunctionMode function_mode) {
   Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   SetFunctionInstanceDescriptor(map, function_mode);
-  if (IsFunctionModeWithPrototype(function_mode)) map->set_is_constructor();
+  map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
   map->set_is_callable();
   return map;
 }
@@ -715,7 +715,7 @@
     FunctionMode function_mode, Handle<JSFunction> empty_function) {
   Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   SetStrictFunctionInstanceDescriptor(map, function_mode);
-  if (IsFunctionModeWithPrototype(function_mode)) map->set_is_constructor();
+  map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
   map->set_is_callable();
   Map::SetPrototype(map, empty_function);
   return map;
@@ -726,7 +726,7 @@
     Handle<JSFunction> empty_function, bool is_constructor) {
   Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   SetStrongFunctionInstanceDescriptor(map);
-  if (is_constructor) map->set_is_constructor();
+  map->set_is_constructor(is_constructor);
   Map::SetPrototype(map, empty_function);
   map->set_is_callable();
   map->set_is_extensible(is_constructor);
@@ -789,6 +789,7 @@
   // Generator functions do not have "caller" or "arguments" accessors.
   Handle<Map> sloppy_generator_function_map =
       Map::Copy(strict_function_map, "SloppyGeneratorFunction");
+  sloppy_generator_function_map->set_is_constructor(false);
   Map::SetPrototype(sloppy_generator_function_map,
                     generator_function_prototype);
   native_context()->set_sloppy_generator_function_map(
@@ -796,6 +797,7 @@
 
   Handle<Map> strict_generator_function_map =
       Map::Copy(strict_function_map, "StrictGeneratorFunction");
+  strict_generator_function_map->set_is_constructor(false);
   Map::SetPrototype(strict_generator_function_map,
                     generator_function_prototype);
   native_context()->set_strict_generator_function_map(
@@ -804,6 +806,7 @@
   Handle<Map> strong_function_map(native_context()->strong_function_map());
   Handle<Map> strong_generator_function_map =
       Map::Copy(strong_function_map, "StrongGeneratorFunction");
+  strong_generator_function_map->set_is_constructor(false);
   Map::SetPrototype(strong_generator_function_map,
                     generator_function_prototype);
   native_context()->set_strong_generator_function_map(
@@ -822,7 +825,7 @@
                              PropertyAttributes attributes,
                              Handle<AccessorPair> accessor_pair) {
   DescriptorArray* descriptors = map->instance_descriptors();
-  int idx = descriptors->SearchWithCache(*name, *map);
+  int idx = descriptors->SearchWithCache(map->GetIsolate(), *name, *map);
   AccessorConstantDescriptor descriptor(name, accessor_pair, attributes);
   descriptors->Replace(idx, &descriptor);
 }
@@ -952,7 +955,6 @@
   }
 
   js_global_object_function->initial_map()->set_is_prototype_map(true);
-  js_global_object_function->initial_map()->set_is_hidden_prototype();
   js_global_object_function->initial_map()->set_dictionary_map(true);
   Handle<JSGlobalObject> global_object =
       factory()->NewJSGlobalObject(js_global_object_function);
@@ -973,10 +975,10 @@
         isolate(), global_constructor, factory()->the_hole_value(),
         ApiNatives::GlobalProxyType);
   }
-
   Handle<String> global_name = factory()->global_string();
   global_proxy_function->shared()->set_instance_class_name(*global_name);
   global_proxy_function->initial_map()->set_is_access_check_needed(true);
+  global_proxy_function->initial_map()->set_has_hidden_prototype(true);
 
   // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
   // Return the global proxy.
@@ -1063,7 +1065,7 @@
 // work in the snapshot case is done in HookUpGlobalObject.
 void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
                                Handle<JSFunction> empty_function,
-                               ContextType context_type) {
+                               GlobalContextType context_type) {
   // --- N a t i v e   C o n t e x t ---
   // Use the empty function as closure (no scope info).
   native_context()->set_closure(*empty_function);
@@ -1095,6 +1097,13 @@
     Handle<JSFunction> object_freeze = SimpleInstallFunction(
         object_function, "freeze", Builtins::kObjectFreeze, 1, false);
     native_context()->set_object_freeze(*object_freeze);
+    SimpleInstallFunction(object_function, "getOwnPropertyDescriptor",
+                          Builtins::kObjectGetOwnPropertyDescriptor, 2, false);
+    SimpleInstallFunction(object_function, "getOwnPropertyNames",
+                          Builtins::kObjectGetOwnPropertyNames, 1, false);
+    SimpleInstallFunction(object_function, "getOwnPropertySymbols",
+                          Builtins::kObjectGetOwnPropertySymbols, 1, false);
+    SimpleInstallFunction(object_function, "is", Builtins::kObjectIs, 2, true);
     Handle<JSFunction> object_is_extensible =
         SimpleInstallFunction(object_function, "isExtensible",
                               Builtins::kObjectIsExtensible, 1, false);
@@ -1140,6 +1149,22 @@
     SimpleInstallFunction(prototype, factory->toString_string(),
                           Builtins::kFunctionPrototypeToString, 0, false);
 
+    // Install the @@hasInstance function.
+    Handle<JSFunction> has_instance = InstallFunction(
+        prototype, factory->has_instance_symbol(), JS_OBJECT_TYPE,
+        JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+        Builtins::kFunctionHasInstance,
+        static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
+
+    // Set the expected parameters for @@hasInstance to 1; required by builtin.
+    has_instance->shared()->set_internal_formal_parameter_count(1);
+
+    // Set the length for the function to satisfy ECMA-262.
+    has_instance->shared()->set_length(1);
+
+    // Install in the native context
+    native_context()->set_ordinary_has_instance(*has_instance);
+
     // Install the "constructor" property on the %FunctionPrototype%.
     JSObject::AddProperty(prototype, factory->constructor_string(),
                           function_fun, DONT_ENUM);
@@ -1216,9 +1241,29 @@
     Handle<JSFunction> boolean_fun =
         InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
                         isolate->initial_object_prototype(),
-                        Builtins::kIllegal);
+                        Builtins::kBooleanConstructor);
+    boolean_fun->shared()->DontAdaptArguments();
+    boolean_fun->shared()->set_construct_stub(
+        *isolate->builtins()->BooleanConstructor_ConstructStub());
+    boolean_fun->shared()->set_length(1);
     InstallWithIntrinsicDefaultProto(isolate, boolean_fun,
                                      Context::BOOLEAN_FUNCTION_INDEX);
+
+    // Create the %BooleanPrototype%
+    Handle<JSValue> prototype =
+        Handle<JSValue>::cast(factory->NewJSObject(boolean_fun, TENURED));
+    prototype->set_value(isolate->heap()->false_value());
+    Accessors::FunctionSetPrototype(boolean_fun, prototype).Assert();
+
+    // Install the "constructor" property on the {prototype}.
+    JSObject::AddProperty(prototype, factory->constructor_string(), boolean_fun,
+                          DONT_ENUM);
+
+    // Install the Boolean.prototype methods.
+    SimpleInstallFunction(prototype, "toString",
+                          Builtins::kBooleanPrototypeToString, 0, false);
+    SimpleInstallFunction(prototype, "valueOf",
+                          Builtins::kBooleanPrototypeValueOf, 0, false);
   }
 
   {  // --- S t r i n g ---
@@ -1234,6 +1279,7 @@
 
     Handle<Map> string_map =
         Handle<Map>(native_context()->string_function()->initial_map());
+    string_map->set_elements_kind(FAST_STRING_WRAPPER_ELEMENTS);
     Map::EnsureDescriptorSlack(string_map, 1);
 
     PropertyAttributes attribs = static_cast<PropertyAttributes>(
@@ -1250,14 +1296,20 @@
 
   {
     // --- S y m b o l ---
-    Handle<JSFunction> symbol_fun = InstallFunction(
-        global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
-        isolate->initial_object_prototype(), Builtins::kSymbolConstructor);
+    Handle<JSObject> prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    Handle<JSFunction> symbol_fun =
+        InstallFunction(global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
+                        prototype, Builtins::kSymbolConstructor);
     symbol_fun->shared()->set_construct_stub(
         *isolate->builtins()->SymbolConstructor_ConstructStub());
     symbol_fun->shared()->set_length(1);
     symbol_fun->shared()->DontAdaptArguments();
     native_context()->set_symbol_function(*symbol_fun);
+
+    // Install the "constructor" property on the {prototype}.
+    JSObject::AddProperty(prototype, factory->constructor_string(), symbol_fun,
+                          DONT_ENUM);
   }
 
   {  // --- D a t e ---
@@ -1290,12 +1342,13 @@
                           Builtins::kDatePrototypeToDateString, 0, false);
     SimpleInstallFunction(prototype, "toTimeString",
                           Builtins::kDatePrototypeToTimeString, 0, false);
-    SimpleInstallFunction(prototype, "toGMTString",
-                          Builtins::kDatePrototypeToUTCString, 0, false);
     SimpleInstallFunction(prototype, "toISOString",
                           Builtins::kDatePrototypeToISOString, 0, false);
-    SimpleInstallFunction(prototype, "toUTCString",
-                          Builtins::kDatePrototypeToUTCString, 0, false);
+    Handle<JSFunction> to_utc_string =
+        SimpleInstallFunction(prototype, "toUTCString",
+                              Builtins::kDatePrototypeToUTCString, 0, false);
+    InstallFunction(prototype, to_utc_string,
+                    factory->InternalizeUtf8String("toGMTString"), DONT_ENUM);
     SimpleInstallFunction(prototype, "getDate", Builtins::kDatePrototypeGetDate,
                           0, true);
     SimpleInstallFunction(prototype, "setDate", Builtins::kDatePrototypeSetDate,
@@ -1504,9 +1557,11 @@
         cons,
         Handle<Object>(native_context()->initial_object_prototype(), isolate));
     cons->shared()->set_instance_class_name(*name);
-    Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
-    DCHECK(json_object->IsJSObject());
-    JSObject::AddProperty(global, name, json_object, DONT_ENUM);
+    Handle<JSObject> math = factory->NewJSObject(cons, TENURED);
+    DCHECK(math->IsJSObject());
+    JSObject::AddProperty(global, name, math, DONT_ENUM);
+    SimpleInstallFunction(math, "max", Builtins::kMathMax, 2, false);
+    SimpleInstallFunction(math, "min", Builtins::kMathMin, 2, false);
   }
 
   {  // -- A r r a y B u f f e r
@@ -1527,16 +1582,16 @@
     TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
 #undef INSTALL_TYPED_ARRAY
 
-    Handle<JSFunction> data_view_fun =
-        InstallFunction(
-            global, "DataView", JS_DATA_VIEW_TYPE,
-            JSDataView::kSizeWithInternalFields,
-            isolate->initial_object_prototype(),
-            Builtins::kIllegal);
+    Handle<JSFunction> data_view_fun = InstallFunction(
+        global, "DataView", JS_DATA_VIEW_TYPE,
+        JSDataView::kSizeWithInternalFields,
+        isolate->initial_object_prototype(), Builtins::kDataViewConstructor);
     InstallWithIntrinsicDefaultProto(isolate, data_view_fun,
                                      Context::DATA_VIEW_FUN_INDEX);
     data_view_fun->shared()->set_construct_stub(
-        *isolate->builtins()->JSBuiltinsConstructStub());
+        *isolate->builtins()->DataViewConstructor_ConstructStub());
+    data_view_fun->shared()->set_length(3);
+    data_view_fun->shared()->DontAdaptArguments();
   }
 
   {  // -- M a p
@@ -1557,7 +1612,7 @@
 
   {  // -- I t e r a t o r R e s u l t
     Handle<Map> map =
-        factory->NewMap(JS_ITERATOR_RESULT_TYPE, JSIteratorResult::kSize);
+        factory->NewMap(JS_OBJECT_TYPE, JSIteratorResult::kSize);
     Map::SetPrototype(map, isolate->initial_object_prototype());
     Map::EnsureDescriptorSlack(map, 2);
 
@@ -1573,6 +1628,7 @@
       map->AppendDescriptor(&d);
     }
 
+    map->SetConstructor(native_context()->object_function());
     map->SetInObjectProperties(2);
     native_context()->set_iterator_result_map(*map);
   }
@@ -1618,7 +1674,7 @@
     native_context()->set_bound_function_without_constructor_map(*map);
 
     map = Map::Copy(map, "IsConstructor");
-    map->set_is_constructor();
+    map->set_is_constructor(true);
     native_context()->set_bound_function_with_constructor_map(*map);
   }
 
@@ -1633,18 +1689,20 @@
     function->shared()->set_instance_class_name(*arguments_string);
 
     Handle<Map> map = factory->NewMap(
-        JS_OBJECT_TYPE, Heap::kSloppyArgumentsObjectSize, FAST_ELEMENTS);
+        JS_OBJECT_TYPE, JSSloppyArgumentsObject::kSize, FAST_ELEMENTS);
     // Create the descriptor array for the arguments object.
     Map::EnsureDescriptorSlack(map, 2);
 
     {  // length
-      DataDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex,
-                       DONT_ENUM, Representation::Tagged());
+      DataDescriptor d(factory->length_string(),
+                       JSSloppyArgumentsObject::kLengthIndex, DONT_ENUM,
+                       Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     {  // callee
-      DataDescriptor d(factory->callee_string(), Heap::kArgumentsCalleeIndex,
-                       DONT_ENUM, Representation::Tagged());
+      DataDescriptor d(factory->callee_string(),
+                       JSSloppyArgumentsObject::kCalleeIndex, DONT_ENUM,
+                       Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     // @@iterator method is added later.
@@ -1656,8 +1714,6 @@
     JSFunction::SetInitialMap(function, map,
                               isolate->initial_object_prototype());
 
-    DCHECK(map->GetInObjectProperties() > Heap::kArgumentsCalleeIndex);
-    DCHECK(map->GetInObjectProperties() > Heap::kArgumentsLengthIndex);
     DCHECK(!map->is_dictionary_map());
     DCHECK(IsFastObjectElementsKind(map->elements_kind()));
   }
@@ -1693,13 +1749,14 @@
 
     // Create the map. Allocate one in-object field for length.
     Handle<Map> map = factory->NewMap(
-        JS_OBJECT_TYPE, Heap::kStrictArgumentsObjectSize, FAST_ELEMENTS);
+        JS_OBJECT_TYPE, JSStrictArgumentsObject::kSize, FAST_ELEMENTS);
     // Create the descriptor array for the arguments object.
     Map::EnsureDescriptorSlack(map, 3);
 
     {  // length
-      DataDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex,
-                       DONT_ENUM, Representation::Tagged());
+      DataDescriptor d(factory->length_string(),
+                       JSStrictArgumentsObject::kLengthIndex, DONT_ENUM,
+                       Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     {  // callee
@@ -1725,7 +1782,6 @@
 
     native_context()->set_strict_arguments_map(*map);
 
-    DCHECK(map->GetInObjectProperties() > Heap::kArgumentsLengthIndex);
     DCHECK(!map->is_dictionary_map());
     DCHECK(IsFastObjectElementsKind(map->elements_kind()));
   }
@@ -1805,7 +1861,7 @@
   Handle<Object> args[] = {global, utils, extras_utils};
 
   return Bootstrapper::CompileNative(isolate, name, source_code,
-                                     arraysize(args), args);
+                                     arraysize(args), args, NATIVES_CODE);
 }
 
 
@@ -1818,7 +1874,7 @@
   Handle<Object> utils = isolate->natives_utils_object();
   Handle<Object> args[] = {global, utils};
   return Bootstrapper::CompileNative(isolate, name, source_code,
-                                     arraysize(args), args);
+                                     arraysize(args), args, NATIVES_CODE);
 }
 
 
@@ -1832,7 +1888,7 @@
   Handle<Object> extras_utils = isolate->extras_utils_object();
   Handle<Object> args[] = {global, binding, extras_utils};
   return Bootstrapper::CompileNative(isolate, name, source_code,
-                                     arraysize(args), args);
+                                     arraysize(args), args, EXTENSION_CODE);
 }
 
 
@@ -1847,13 +1903,13 @@
   Handle<Object> extras_utils = isolate->extras_utils_object();
   Handle<Object> args[] = {global, binding, extras_utils};
   return Bootstrapper::CompileNative(isolate, name, source_code,
-                                     arraysize(args), args);
+                                     arraysize(args), args, EXTENSION_CODE);
 }
 
-
 bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
                                  Handle<String> source, int argc,
-                                 Handle<Object> argv[]) {
+                                 Handle<Object> argv[],
+                                 NativesFlag natives_flag) {
   SuppressDebug compiling_natives(isolate->debug());
   // During genesis, the boilerplate for stack overflow won't work until the
   // environment has been at least partially initialized. Add a stack check
@@ -1870,7 +1926,7 @@
       isolate->factory()->NewStringFromUtf8(name).ToHandleChecked();
   Handle<SharedFunctionInfo> function_info = Compiler::CompileScript(
       source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
-      context, NULL, NULL, ScriptCompiler::kNoCompileOptions, NATIVES_CODE,
+      context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
       false);
   if (function_info.is_null()) return false;
 
@@ -1928,7 +1984,7 @@
     function_info = Compiler::CompileScript(
         source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
         context, extension, NULL, ScriptCompiler::kNoCompileOptions,
-        NOT_NATIVES_CODE, false);
+        EXTENSION_CODE, false);
     if (function_info.is_null()) return false;
     cache->Add(name, function_info);
   }
@@ -1977,8 +2033,7 @@
   return Handle<JSObject>::cast(value);
 }
 
-
-void Genesis::ConfigureUtilsObject(ContextType context_type) {
+void Genesis::ConfigureUtilsObject(GlobalContextType context_type) {
   switch (context_type) {
     // We still need the utils object to find debug functions.
     case DEBUG_CONTEXT:
@@ -2027,24 +2082,6 @@
 #undef EXPORT_PUBLIC_SYMBOL
 
   {
-    Handle<JSFunction> apply = InstallFunction(
-        container, "reflect_apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
-        MaybeHandle<JSObject>(), Builtins::kReflectApply);
-    apply->shared()->DontAdaptArguments();
-    apply->shared()->set_length(3);
-    native_context->set_reflect_apply(*apply);
-  }
-
-  {
-    Handle<JSFunction> construct = InstallFunction(
-        container, "reflect_construct", JS_OBJECT_TYPE, JSObject::kHeaderSize,
-        MaybeHandle<JSObject>(), Builtins::kReflectConstruct);
-    construct->shared()->DontAdaptArguments();
-    construct->shared()->set_length(2);
-    native_context->set_reflect_construct(*construct);
-  }
-
-  {
     Handle<JSFunction> to_string = InstallFunction(
         container, "object_to_string", JS_OBJECT_TYPE, JSObject::kHeaderSize,
         MaybeHandle<JSObject>(), Builtins::kObjectProtoToString);
@@ -2279,7 +2316,6 @@
   }
 
   INITIALIZE_FLAG(FLAG_harmony_tostring)
-  INITIALIZE_FLAG(FLAG_harmony_tolength)
   INITIALIZE_FLAG(FLAG_harmony_species)
 
 #undef INITIALIZE_FLAG
@@ -2299,13 +2335,15 @@
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexps)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_completion)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tolength)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_iterator_close)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_name)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(promise_extra)
-
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_instanceof)
 
 void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
                          const char* name, Handle<Symbol> value) {
@@ -2328,13 +2366,6 @@
 }
 
 
-void Genesis::InitializeGlobal_harmony_concat_spreadable() {
-  if (!FLAG_harmony_concat_spreadable) return;
-  InstallPublicSymbol(factory(), native_context(), "isConcatSpreadable",
-                      factory()->is_concat_spreadable_symbol());
-}
-
-
 void Genesis::InitializeGlobal_harmony_regexp_subclass() {
   if (!FLAG_harmony_regexp_subclass) return;
   InstallPublicSymbol(factory(), native_context(), "match",
@@ -2364,6 +2395,15 @@
                            Builtins::kReflectDeleteProperty, 2, true);
   native_context()->set_reflect_delete_property(*delete_property);
 
+  Handle<JSFunction> apply = SimpleCreateFunction(
+      isolate(), factory->apply_string(), Builtins::kReflectApply, 3, false);
+  native_context()->set_reflect_apply(*apply);
+
+  Handle<JSFunction> construct =
+      SimpleCreateFunction(isolate(), factory->construct_string(),
+                           Builtins::kReflectConstruct, 2, false);
+  native_context()->set_reflect_construct(*construct);
+
   if (!FLAG_harmony_reflect) return;
 
   Handle<JSGlobalObject> global(JSGlobalObject::cast(
@@ -2375,6 +2415,8 @@
 
   InstallFunction(reflect, define_property, factory->defineProperty_string());
   InstallFunction(reflect, delete_property, factory->deleteProperty_string());
+  InstallFunction(reflect, apply, factory->apply_string());
+  InstallFunction(reflect, construct, factory->construct_string());
 
   SimpleInstallFunction(reflect, factory->get_string(),
                         Builtins::kReflectGet, 2, false);
@@ -2438,6 +2480,35 @@
 }
 
 
+void Genesis::InitializeGlobal_harmony_object_values_entries() {
+  if (!FLAG_harmony_object_values_entries) return;
+
+  Handle<JSGlobalObject> global(
+      JSGlobalObject::cast(native_context()->global_object()));
+  Isolate* isolate = global->GetIsolate();
+  Factory* factory = isolate->factory();
+
+  Handle<JSFunction> object_function = isolate->object_function();
+  SimpleInstallFunction(object_function, factory->entries_string(),
+                        Builtins::kObjectEntries, 1, false);
+  SimpleInstallFunction(object_function, factory->values_string(),
+                        Builtins::kObjectValues, 1, false);
+}
+
+void Genesis::InitializeGlobal_harmony_object_own_property_descriptors() {
+  if (!FLAG_harmony_object_own_property_descriptors) return;
+
+  Handle<JSGlobalObject> global(
+      JSGlobalObject::cast(native_context()->global_object()));
+  Isolate* isolate = global->GetIsolate();
+  Factory* factory = isolate->factory();
+
+  Handle<JSFunction> object_function = isolate->object_function();
+  SimpleInstallFunction(object_function,
+                        factory->getOwnPropertyDescriptors_string(),
+                        Builtins::kObjectGetOwnPropertyDescriptors, 1, false);
+}
+
 void Genesis::InstallJSProxyMaps() {
   // Allocate the different maps for all Proxy types.
   // Next to the default proxy, we need maps indicating callable and
@@ -2445,7 +2516,7 @@
 
   Handle<Map> proxy_function_map =
       Map::Copy(isolate()->sloppy_function_without_prototype_map(), "Proxy");
-  proxy_function_map->set_is_constructor();
+  proxy_function_map->set_is_constructor(true);
   native_context()->set_proxy_function_map(*proxy_function_map);
 
   Handle<Map> proxy_map =
@@ -2460,7 +2531,7 @@
 
   Handle<Map> proxy_constructor_map =
       Map::Copy(proxy_callable_map, "constructor Proxy");
-  proxy_constructor_map->set_is_constructor();
+  proxy_constructor_map->set_is_constructor(true);
   native_context()->set_proxy_constructor_map(*proxy_constructor_map);
 }
 
@@ -2478,8 +2549,9 @@
   Handle<String> name = factory->Proxy_string();
   Handle<Code> code(isolate->builtins()->ProxyConstructor());
 
-  Handle<JSFunction> proxy_function = factory->NewFunction(
-      isolate->proxy_function_map(), factory->Proxy_string(), code);
+  Handle<JSFunction> proxy_function =
+      factory->NewFunction(isolate->proxy_function_map(),
+                           factory->Proxy_string(), MaybeHandle<Code>(code));
 
   JSFunction::SetInitialMap(proxy_function,
                             Handle<Map>(native_context()->proxy_map(), isolate),
@@ -2574,8 +2646,7 @@
   return array_function;
 }
 
-
-bool Genesis::InstallNatives(ContextType context_type) {
+bool Genesis::InstallNatives(GlobalContextType context_type) {
   HandleScope scope(isolate());
 
   // Set up the utils object as shared container between native scripts.
@@ -2637,10 +2708,11 @@
 
   if (!CallUtilsFunction(isolate(), "PostNatives")) return false;
 
-  auto function_cache =
+  auto template_instantiations_cache =
       ObjectHashTable::New(isolate(), ApiNatives::kInitialFunctionCacheSize,
                            USE_CUSTOM_MINIMUM_CAPACITY);
-  native_context()->set_function_cache(*function_cache);
+  native_context()->set_template_instantiations_cache(
+      *template_instantiations_cache);
 
   // Store the map for the %ObjectPrototype% after the natives has been compiled
   // and the Object function has been set up.
@@ -2717,6 +2789,91 @@
 
   InstallBuiltinFunctionIds();
 
+  // Create a map for accessor property descriptors (a variant of JSObject
+  // that predefines four properties get, set, configurable and enumerable).
+  {
+    // AccessorPropertyDescriptor initial map.
+    Handle<Map> map =
+        factory()->NewMap(JS_OBJECT_TYPE, JSAccessorPropertyDescriptor::kSize);
+    // Create the descriptor array for the property descriptor object.
+    Map::EnsureDescriptorSlack(map, 4);
+
+    {  // get
+      DataDescriptor d(factory()->get_string(),
+                       JSAccessorPropertyDescriptor::kGetIndex, NONE,
+                       Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+    {  // set
+      DataDescriptor d(factory()->set_string(),
+                       JSAccessorPropertyDescriptor::kSetIndex, NONE,
+                       Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+    {  // enumerable
+      DataDescriptor d(factory()->enumerable_string(),
+                       JSAccessorPropertyDescriptor::kEnumerableIndex, NONE,
+                       Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+    {  // configurable
+      DataDescriptor d(factory()->configurable_string(),
+                       JSAccessorPropertyDescriptor::kConfigurableIndex, NONE,
+                       Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+
+    Map::SetPrototype(map, isolate()->initial_object_prototype());
+    map->SetConstructor(native_context()->object_function());
+    map->SetInObjectProperties(4);
+    map->set_unused_property_fields(0);
+
+    native_context()->set_accessor_property_descriptor_map(*map);
+  }
+
+  // Create a map for data property descriptors (a variant of JSObject
+  // that predefines four properties value, writable, configurable and
+  // enumerable).
+  {
+    // DataPropertyDescriptor initial map.
+    Handle<Map> map =
+        factory()->NewMap(JS_OBJECT_TYPE, JSDataPropertyDescriptor::kSize);
+    // Create the descriptor array for the property descriptor object.
+    Map::EnsureDescriptorSlack(map, 4);
+
+    {  // value
+      DataDescriptor d(factory()->value_string(),
+                       JSDataPropertyDescriptor::kValueIndex, NONE,
+                       Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+    {  // writable
+      DataDescriptor d(factory()->writable_string(),
+                       JSDataPropertyDescriptor::kWritableIndex, NONE,
+                       Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+    {  // enumerable
+      DataDescriptor d(factory()->enumerable_string(),
+                       JSDataPropertyDescriptor::kEnumerableIndex, NONE,
+                       Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+    {  // configurable
+      DataDescriptor d(factory()->configurable_string(),
+                       JSDataPropertyDescriptor::kConfigurableIndex, NONE,
+                       Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+
+    Map::SetPrototype(map, isolate()->initial_object_prototype());
+    map->SetConstructor(native_context()->object_function());
+    map->SetInObjectProperties(4);
+    map->set_unused_property_fields(0);
+
+    native_context()->set_data_property_descriptor_map(*map);
+  }
+
   // Create a constructor for RegExp results (a variant of Array that
   // predefines the two properties index and match).
   {
@@ -2745,7 +2902,7 @@
           array_function->initial_map()->instance_descriptors());
       Handle<String> length = factory()->length_string();
       int old = array_descriptors->SearchWithCache(
-          *length, array_function->initial_map());
+          isolate(), *length, array_function->initial_map());
       DCHECK(old != DescriptorArray::kNotFound);
       AccessorConstantDescriptor desc(
           length, handle(array_descriptors->GetValue(old), isolate()),
@@ -2817,11 +2974,13 @@
   static const char* harmony_regexps_natives[] = {"native harmony-regexp.js",
                                                   nullptr};
   static const char* harmony_tostring_natives[] = {nullptr};
+  static const char* harmony_iterator_close_natives[] = {nullptr};
   static const char* harmony_sloppy_natives[] = {nullptr};
   static const char* harmony_sloppy_function_natives[] = {nullptr};
   static const char* harmony_sloppy_let_natives[] = {nullptr};
   static const char* harmony_species_natives[] = {"native harmony-species.js",
                                                   nullptr};
+  static const char* harmony_tailcalls_natives[] = {nullptr};
   static const char* harmony_unicode_regexps_natives[] = {
       "native harmony-unicode-regexps.js", nullptr};
   static const char* harmony_default_parameters_natives[] = {nullptr};
@@ -2833,17 +2992,20 @@
       "native harmony-object-observe.js", nullptr};
   static const char* harmony_sharedarraybuffer_natives[] = {
       "native harmony-sharedarraybuffer.js", "native harmony-atomics.js", NULL};
-  static const char* harmony_concat_spreadable_natives[] = {nullptr};
   static const char* harmony_simd_natives[] = {"native harmony-simd.js",
                                                nullptr};
-  static const char* harmony_tolength_natives[] = {nullptr};
-  static const char* harmony_completion_natives[] = {nullptr};
   static const char* harmony_do_expressions_natives[] = {nullptr};
   static const char* harmony_regexp_subclass_natives[] = {nullptr};
   static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
+  static const char* harmony_instanceof_natives[] = {nullptr};
+  static const char* harmony_regexp_property_natives[] = {nullptr};
   static const char* harmony_function_name_natives[] = {nullptr};
+  static const char* harmony_function_sent_natives[] = {nullptr};
   static const char* promise_extra_natives[] = {"native promise-extra.js",
                                                 nullptr};
+  static const char* harmony_object_values_entries_natives[] = {nullptr};
+  static const char* harmony_object_own_property_descriptors_natives[] = {
+      nullptr};
 
   for (int i = ExperimentalNatives::GetDebuggerCount();
        i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -3371,12 +3533,11 @@
   bool enabled_;
 };
 
-
 Genesis::Genesis(Isolate* isolate,
                  MaybeHandle<JSGlobalProxy> maybe_global_proxy,
                  v8::Local<v8::ObjectTemplate> global_proxy_template,
                  v8::ExtensionConfiguration* extensions,
-                 ContextType context_type)
+                 GlobalContextType context_type)
     : isolate_(isolate), active_(isolate->bootstrapper()) {
   NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
   result_ = Handle<Context>::null();
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 44f0f1b..d1bf201 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -61,7 +61,7 @@
   DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
 };
 
-enum ContextType { FULL_CONTEXT, THIN_CONTEXT, DEBUG_CONTEXT };
+enum GlobalContextType { FULL_CONTEXT, THIN_CONTEXT, DEBUG_CONTEXT };
 
 // The Boostrapper is the public interface for creating a JavaScript global
 // context.
@@ -80,7 +80,7 @@
       MaybeHandle<JSGlobalProxy> maybe_global_proxy,
       v8::Local<v8::ObjectTemplate> global_object_template,
       v8::ExtensionConfiguration* extensions,
-      ContextType context_type = FULL_CONTEXT);
+      GlobalContextType context_type = FULL_CONTEXT);
 
   // Detach the environment from its outer global object.
   void DetachGlobal(Handle<Context> env);
@@ -109,7 +109,7 @@
 
   static bool CompileNative(Isolate* isolate, Vector<const char> name,
                             Handle<String> source, int argc,
-                            Handle<Object> argv[]);
+                            Handle<Object> argv[], NativesFlag natives_flag);
   static bool CompileBuiltin(Isolate* isolate, int index);
   static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
   static bool CompileExtraBuiltin(Isolate* isolate, int index);
diff --git a/src/builtins.cc b/src/builtins.cc
index 34b370f..23c41f7 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -59,7 +59,8 @@
     return Arguments::at<Object>(0);
   }
 
-  Handle<JSFunction> target();
+  template <class S>
+  Handle<S> target();
   Handle<HeapObject> new_target();
 
   // Gets the total number of arguments including the receiver (but
@@ -81,8 +82,9 @@
 }
 
 template <>
-Handle<JSFunction> BuiltinArguments<BuiltinExtraArguments::kTarget>::target() {
-  return Arguments::at<JSFunction>(Arguments::length() - 1);
+template <class S>
+Handle<S> BuiltinArguments<BuiltinExtraArguments::kTarget>::target() {
+  return Arguments::at<S>(Arguments::length() - 1);
 }
 
 template <>
@@ -103,9 +105,10 @@
 }
 
 template <>
-Handle<JSFunction>
+template <class S>
+Handle<S>
 BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::target() {
-  return Arguments::at<JSFunction>(Arguments::length() - 2);
+  return Arguments::at<S>(Arguments::length() - 2);
 }
 
 template <>
@@ -134,17 +137,21 @@
 // In the body of the builtin function the arguments can be accessed
 // through the BuiltinArguments object args.
 
-#define BUILTIN(name)                                            \
-  MUST_USE_RESULT static Object* Builtin_Impl_##name(            \
-      name##ArgumentsType args, Isolate* isolate);               \
-  MUST_USE_RESULT static Object* Builtin_##name(                 \
-      int args_length, Object** args_object, Isolate* isolate) { \
-    name##ArgumentsType args(args_length, args_object);          \
-    return Builtin_Impl_##name(args, isolate);                   \
-  }                                                              \
-  MUST_USE_RESULT static Object* Builtin_Impl_##name(            \
-      name##ArgumentsType args, Isolate* isolate)
-
+#define BUILTIN(name)                                                          \
+  MUST_USE_RESULT static Object* Builtin_Impl_##name(name##ArgumentsType args, \
+                                                     Isolate* isolate);        \
+  MUST_USE_RESULT static Object* Builtin_##name(                               \
+      int args_length, Object** args_object, Isolate* isolate) {               \
+    isolate->counters()->runtime_calls()->Increment();                         \
+    RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();       \
+    RuntimeCallTimerScope timer(isolate, &stats->Builtin_##name);              \
+    name##ArgumentsType args(args_length, args_object);                        \
+    Object* value = Builtin_Impl_##name(args, isolate);                        \
+    return value;                                                              \
+  }                                                                            \
+                                                                               \
+  MUST_USE_RESULT static Object* Builtin_Impl_##name(name##ArgumentsType args, \
+                                                     Isolate* isolate)
 
 // ----------------------------------------------------------------------------
 
@@ -194,7 +201,7 @@
   Map* arguments_map = isolate->native_context()->sloppy_arguments_map();
   if (object->map() != arguments_map) return false;
   DCHECK(object->HasFastElements());
-  Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
+  Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
   if (!len_obj->IsSmi()) return false;
   *out = Max(0, Smi::cast(len_obj)->value());
   return *out <= object->elements()->length();
@@ -208,12 +215,12 @@
     JSObject* current = iter->GetCurrent<JSObject>();
     if (current->IsAccessCheckNeeded()) return false;
     if (current->HasIndexedInterceptor()) return false;
+    if (current->HasStringWrapperElements()) return false;
     if (current->elements()->length() != 0) return false;
   }
   return true;
 }
 
-
 inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
                                               JSArray* receiver) {
   DisallowHeapAllocation no_gc;
@@ -231,12 +238,46 @@
   return PrototypeHasNoElements(&iter);
 }
 
+inline bool HasSimpleElements(JSObject* current) {
+  if (current->IsAccessCheckNeeded()) return false;
+  if (current->HasIndexedInterceptor()) return false;
+  if (current->HasStringWrapperElements()) return false;
+  if (current->GetElementsAccessor()->HasAccessors(current)) return false;
+  return true;
+}
+
+inline bool HasOnlySimpleReceiverElements(Isolate* isolate,
+                                          JSReceiver* receiver) {
+  // Check that we have no accessors on the receiver's elements.
+  JSObject* object = JSObject::cast(receiver);
+  if (!HasSimpleElements(object)) return false;
+  // Check that ther are not elements on the prototype.
+  DisallowHeapAllocation no_gc;
+  PrototypeIterator iter(isolate, receiver);
+  return PrototypeHasNoElements(&iter);
+}
+
+inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
+  // Check that ther are not elements on the prototype.
+  DisallowHeapAllocation no_gc;
+  PrototypeIterator iter(isolate, receiver,
+                         PrototypeIterator::START_AT_RECEIVER);
+  for (; !iter.IsAtEnd(); iter.Advance()) {
+    if (iter.GetCurrent()->IsJSProxy()) return false;
+    JSObject* current = iter.GetCurrent<JSObject>();
+    if (!HasSimpleElements(current)) return false;
+  }
+  return true;
+}
 
 // Returns empty handle if not applicable.
 MUST_USE_RESULT
 inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
     Isolate* isolate, Handle<Object> receiver, Arguments* args,
     int first_added_arg) {
+  // We explicitly add a HandleScope to avoid creating several copies of the
+  // same handle which would otherwise cause issue when left-trimming later-on.
+  HandleScope scope(isolate);
   if (!receiver->IsJSArray()) return MaybeHandle<FixedArrayBase>();
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
   // If there may be elements accessors in the prototype chain, the fast path
@@ -250,12 +291,18 @@
   Handle<FixedArrayBase> elms(array->elements(), isolate);
   Map* map = elms->map();
   if (map == heap->fixed_array_map()) {
-    if (args == NULL || array->HasFastObjectElements()) return elms;
+    if (args == NULL || array->HasFastObjectElements()) {
+      return scope.CloseAndEscape(elms);
+    }
   } else if (map == heap->fixed_cow_array_map()) {
     elms = JSObject::EnsureWritableFastElements(array);
-    if (args == NULL || array->HasFastObjectElements()) return elms;
+    if (args == NULL || array->HasFastObjectElements()) {
+      return scope.CloseAndEscape(elms);
+    }
   } else if (map == heap->fixed_double_array_map()) {
-    if (args == NULL) return elms;
+    if (args == NULL) {
+      return scope.CloseAndEscape(elms);
+    }
   } else {
     return MaybeHandle<FixedArrayBase>();
   }
@@ -269,7 +316,9 @@
   // Need to ensure that the arguments passed in args can be contained in
   // the array.
   int args_length = args->length();
-  if (first_added_arg >= args_length) return handle(array->elements(), isolate);
+  if (first_added_arg >= args_length) {
+    return scope.CloseAndEscape(elms);
+  }
 
   ElementsKind origin_kind = array->map()->elements_kind();
   DCHECK(!IsFastObjectElementsKind(origin_kind));
@@ -292,9 +341,9 @@
   }
   if (target_kind != origin_kind) {
     JSObject::TransitionElementsKind(array, target_kind);
-    return handle(array->elements(), isolate);
+    elms = handle(array->elements(), isolate);
   }
-  return elms;
+  return scope.CloseAndEscape(elms);
 }
 
 
@@ -458,19 +507,14 @@
   int relative_end = 0;
   bool is_sloppy_arguments = false;
 
-  // TODO(littledan): Look up @@species only once, not once here and
-  // again in the JS builtin. Pass the species out?
-  Handle<Object> species;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
-  if (*species != isolate->context()->native_context()->array_function()) {
-    return CallJsIntrinsic(isolate, isolate->array_slice(), args);
-  }
   if (receiver->IsJSArray()) {
     DisallowHeapAllocation no_gc;
     JSArray* array = JSArray::cast(*receiver);
     if (!array->HasFastElements() ||
-        !IsJSArrayFastElementMovingAllowed(isolate, array)) {
+        !IsJSArrayFastElementMovingAllowed(isolate, array) ||
+        !isolate->IsArraySpeciesLookupChainIntact() ||
+        // If this is a subclass of Array, then call out to JS
+        !array->map()->new_target_is_base()) {
       AllowHeapAllocation allow_allocation;
       return CallJsIntrinsic(isolate, isolate->array_slice(), args);
     }
@@ -548,15 +592,11 @@
   MaybeHandle<FixedArrayBase> maybe_elms_obj =
       EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
   Handle<FixedArrayBase> elms_obj;
-  if (!maybe_elms_obj.ToHandle(&elms_obj)) {
-    return CallJsIntrinsic(isolate, isolate->array_splice(), args);
-  }
-  // TODO(littledan): Look up @@species only once, not once here and
-  // again in the JS builtin. Pass the species out?
-  Handle<Object> species;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
-  if (*species != isolate->context()->native_context()->array_function()) {
+  if (!maybe_elms_obj.ToHandle(&elms_obj) ||
+      // If this is a subclass of Array, then call out to JS
+      !JSArray::cast(*receiver)->map()->new_target_is_base() ||
+      // If anything with @@species has been messed with, call out to JS
+      !isolate->IsArraySpeciesLookupChainIntact()) {
     return CallJsIntrinsic(isolate, isolate->array_splice(), args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -627,28 +667,42 @@
  */
 class ArrayConcatVisitor {
  public:
-  ArrayConcatVisitor(Isolate* isolate, Handle<FixedArray> storage,
+  ArrayConcatVisitor(Isolate* isolate, Handle<Object> storage,
                      bool fast_elements)
       : isolate_(isolate),
-        storage_(Handle<FixedArray>::cast(
-            isolate->global_handles()->Create(*storage))),
+        storage_(isolate->global_handles()->Create(*storage)),
         index_offset_(0u),
         bit_field_(FastElementsField::encode(fast_elements) |
-                   ExceedsLimitField::encode(false)) {}
+                   ExceedsLimitField::encode(false) |
+                   IsFixedArrayField::encode(storage->IsFixedArray())) {
+    DCHECK(!(this->fast_elements() && !is_fixed_array()));
+  }
 
   ~ArrayConcatVisitor() { clear_storage(); }
 
-  void visit(uint32_t i, Handle<Object> elm) {
-    if (i >= JSObject::kMaxElementCount - index_offset_) {
-      set_exceeds_array_limit(true);
-      return;
-    }
+  bool visit(uint32_t i, Handle<Object> elm) {
     uint32_t index = index_offset_ + i;
 
+    if (!is_fixed_array()) {
+      Handle<Object> element_value;
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+          isolate_, element_value,
+          Object::SetElement(isolate_, storage_, index, elm, STRICT), false);
+      return true;
+    }
+
+    if (i >= JSObject::kMaxElementCount - index_offset_) {
+      set_exceeds_array_limit(true);
+      // Exception hasn't been thrown at this point. Return true to
+      // break out, and caller will throw. !visit would imply that
+      // there is already a pending exception.
+      return true;
+    }
+
     if (fast_elements()) {
-      if (index < static_cast<uint32_t>(storage_->length())) {
-        storage_->set(index, *elm);
-        return;
+      if (index < static_cast<uint32_t>(storage_fixed_array()->length())) {
+        storage_fixed_array()->set(index, *elm);
+        return true;
       }
       // Our initial estimate of length was foiled, possibly by
       // getters on the arrays increasing the length of later arrays
@@ -669,6 +723,7 @@
       clear_storage();
       set_storage(*result);
     }
+    return true;
   }
 
   void increase_index_offset(uint32_t delta) {
@@ -692,6 +747,7 @@
   }
 
   Handle<JSArray> ToArray() {
+    DCHECK(is_fixed_array());
     Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
     Handle<Object> length =
         isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
@@ -699,15 +755,26 @@
         array, fast_elements() ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
     array->set_map(*map);
     array->set_length(*length);
-    array->set_elements(*storage_);
+    array->set_elements(*storage_fixed_array());
     return array;
   }
 
+  // Storage is either a FixedArray (if is_fixed_array()) or a JSReciever
+  // (otherwise)
+  Handle<FixedArray> storage_fixed_array() {
+    DCHECK(is_fixed_array());
+    return Handle<FixedArray>::cast(storage_);
+  }
+  Handle<JSReceiver> storage_jsreceiver() {
+    DCHECK(!is_fixed_array());
+    return Handle<JSReceiver>::cast(storage_);
+  }
+
  private:
   // Convert storage to dictionary mode.
   void SetDictionaryMode() {
-    DCHECK(fast_elements());
-    Handle<FixedArray> current_storage(*storage_);
+    DCHECK(fast_elements() && is_fixed_array());
+    Handle<FixedArray> current_storage = storage_fixed_array();
     Handle<SeededNumberDictionary> slow_storage(
         SeededNumberDictionary::New(isolate_, current_storage->length()));
     uint32_t current_length = static_cast<uint32_t>(current_storage->length());
@@ -735,12 +802,13 @@
   }
 
   inline void set_storage(FixedArray* storage) {
-    storage_ =
-        Handle<FixedArray>::cast(isolate_->global_handles()->Create(storage));
+    DCHECK(is_fixed_array());
+    storage_ = isolate_->global_handles()->Create(storage);
   }
 
   class FastElementsField : public BitField<bool, 0, 1> {};
   class ExceedsLimitField : public BitField<bool, 1, 1> {};
+  class IsFixedArrayField : public BitField<bool, 2, 1> {};
 
   bool fast_elements() const { return FastElementsField::decode(bit_field_); }
   void set_fast_elements(bool fast) {
@@ -749,9 +817,10 @@
   void set_exceeds_array_limit(bool exceeds) {
     bit_field_ = ExceedsLimitField::update(bit_field_, exceeds);
   }
+  bool is_fixed_array() const { return IsFixedArrayField::decode(bit_field_); }
 
   Isolate* isolate_;
-  Handle<FixedArray> storage_;  // Always a global handle.
+  Handle<Object> storage_;  // Always a global handle.
   // Index after last seen index. Always less than or equal to
   // JSObject::kMaxElementCount.
   uint32_t index_offset_;
@@ -806,14 +875,20 @@
       }
       break;
     }
-    case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
-    case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
 
       TYPED_ARRAYS(TYPED_ARRAY_CASE)
 #undef TYPED_ARRAY_CASE
       // External arrays are always dense.
       return length;
+    case NO_ELEMENTS:
+      return 0;
+    case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+    case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+    case FAST_STRING_WRAPPER_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS:
+      UNREACHABLE();
+      return 0;
   }
   // As an estimate, we assume that the prototype doesn't contain any
   // inherited elements.
@@ -821,48 +896,6 @@
 }
 
 
-template <class ExternalArrayClass, class ElementType>
-void IterateTypedArrayElements(Isolate* isolate, Handle<JSObject> receiver,
-                               bool elements_are_ints,
-                               bool elements_are_guaranteed_smis,
-                               ArrayConcatVisitor* visitor) {
-  Handle<ExternalArrayClass> array(
-      ExternalArrayClass::cast(receiver->elements()));
-  uint32_t len = static_cast<uint32_t>(array->length());
-
-  DCHECK(visitor != NULL);
-  if (elements_are_ints) {
-    if (elements_are_guaranteed_smis) {
-      for (uint32_t j = 0; j < len; j++) {
-        HandleScope loop_scope(isolate);
-        Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j))),
-                      isolate);
-        visitor->visit(j, e);
-      }
-    } else {
-      for (uint32_t j = 0; j < len; j++) {
-        HandleScope loop_scope(isolate);
-        int64_t val = static_cast<int64_t>(array->get_scalar(j));
-        if (Smi::IsValid(static_cast<intptr_t>(val))) {
-          Handle<Smi> e(Smi::FromInt(static_cast<int>(val)), isolate);
-          visitor->visit(j, e);
-        } else {
-          Handle<Object> e =
-              isolate->factory()->NewNumber(static_cast<ElementType>(val));
-          visitor->visit(j, e);
-        }
-      }
-    }
-  } else {
-    for (uint32_t j = 0; j < len; j++) {
-      HandleScope loop_scope(isolate);
-      Handle<Object> e = isolate->factory()->NewNumber(array->get_scalar(j));
-      visitor->visit(j, e);
-    }
-  }
-}
-
-
 // Used for sorting indices in a List<uint32_t>.
 int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
   uint32_t a = *ap;
@@ -953,6 +986,28 @@
       }
       break;
     }
+    case FAST_STRING_WRAPPER_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS: {
+      DCHECK(object->IsJSValue());
+      Handle<JSValue> js_value = Handle<JSValue>::cast(object);
+      DCHECK(js_value->value()->IsString());
+      Handle<String> string(String::cast(js_value->value()), isolate);
+      uint32_t length = static_cast<uint32_t>(string->length());
+      uint32_t i = 0;
+      uint32_t limit = Min(length, range);
+      for (; i < limit; i++) {
+        indices->Add(i);
+      }
+      ElementsAccessor* accessor = object->GetElementsAccessor();
+      for (; i < range; i++) {
+        if (accessor->HasElement(object, i)) {
+          indices->Add(i);
+        }
+      }
+      break;
+    }
+    case NO_ELEMENTS:
+      break;
   }
 
   PrototypeIterator iter(isolate, object);
@@ -976,7 +1031,7 @@
       ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_value,
                                        Object::GetElement(isolate, receiver, i),
                                        false);
-      visitor->visit(i, element_value);
+      if (!visitor->visit(i, element_value)) return false;
     }
   }
   visitor->increase_index_offset(length);
@@ -1013,11 +1068,11 @@
     if (!val->ToUint32(&length)) {
       length = 0;
     }
+    // TODO(cbruni): handle other element kind as well
+    return IterateElementsSlow(isolate, receiver, length, visitor);
   }
 
-  if (!(receiver->IsJSArray() || receiver->IsJSTypedArray())) {
-    // For classes which are not known to be safe to access via elements alone,
-    // use the slow case.
+  if (!HasOnlySimpleElements(isolate, *receiver)) {
     return IterateElementsSlow(isolate, receiver, length, visitor);
   }
   Handle<JSObject> array = Handle<JSObject>::cast(receiver);
@@ -1031,12 +1086,12 @@
       // to check the prototype for missing elements.
       Handle<FixedArray> elements(FixedArray::cast(array->elements()));
       int fast_length = static_cast<int>(length);
-      DCHECK(fast_length <= elements->length());
+      DCHECK_LE(fast_length, elements->length());
       for (int j = 0; j < fast_length; j++) {
         HandleScope loop_scope(isolate);
         Handle<Object> element_value(elements->get(j), isolate);
         if (!element_value->IsTheHole()) {
-          visitor->visit(j, element_value);
+          if (!visitor->visit(j, element_value)) return false;
         } else {
           Maybe<bool> maybe = JSReceiver::HasElement(array, j);
           if (!maybe.IsJust()) return false;
@@ -1046,7 +1101,7 @@
             ASSIGN_RETURN_ON_EXCEPTION_VALUE(
                 isolate, element_value, Object::GetElement(isolate, array, j),
                 false);
-            visitor->visit(j, element_value);
+            if (!visitor->visit(j, element_value)) return false;
           }
         }
       }
@@ -1072,7 +1127,7 @@
           double double_value = elements->get_scalar(j);
           Handle<Object> element_value =
               isolate->factory()->NewNumber(double_value);
-          visitor->visit(j, element_value);
+          if (!visitor->visit(j, element_value)) return false;
         } else {
           Maybe<bool> maybe = JSReceiver::HasElement(array, j);
           if (!maybe.IsJust()) return false;
@@ -1083,21 +1138,14 @@
             ASSIGN_RETURN_ON_EXCEPTION_VALUE(
                 isolate, element_value, Object::GetElement(isolate, array, j),
                 false);
-            visitor->visit(j, element_value);
+            if (!visitor->visit(j, element_value)) return false;
           }
         }
       }
       break;
     }
+
     case DICTIONARY_ELEMENTS: {
-      // CollectElementIndices() can't be called when there's a JSProxy
-      // on the prototype chain.
-      for (PrototypeIterator iter(isolate, array); !iter.IsAtEnd();
-           iter.Advance()) {
-        if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
-          return IterateElementsSlow(isolate, array, length, visitor);
-        }
-      }
       Handle<SeededNumberDictionary> dict(array->element_dictionary());
       List<uint32_t> indices(dict->Capacity() / 2);
       // Collect all indices in the object and the prototypes less
@@ -1112,7 +1160,7 @@
         Handle<Object> element;
         ASSIGN_RETURN_ON_EXCEPTION_VALUE(
             isolate, element, Object::GetElement(isolate, array, index), false);
-        visitor->visit(index, element);
+        if (!visitor->visit(index, element)) return false;
         // Skip to next different index (i.e., omit duplicates).
         do {
           j++;
@@ -1120,55 +1168,6 @@
       }
       break;
     }
-    case UINT8_CLAMPED_ELEMENTS: {
-      Handle<FixedUint8ClampedArray> pixels(
-          FixedUint8ClampedArray::cast(array->elements()));
-      for (uint32_t j = 0; j < length; j++) {
-        Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
-        visitor->visit(j, e);
-      }
-      break;
-    }
-    case INT8_ELEMENTS: {
-      IterateTypedArrayElements<FixedInt8Array, int8_t>(isolate, array, true,
-                                                        true, visitor);
-      break;
-    }
-    case UINT8_ELEMENTS: {
-      IterateTypedArrayElements<FixedUint8Array, uint8_t>(isolate, array, true,
-                                                          true, visitor);
-      break;
-    }
-    case INT16_ELEMENTS: {
-      IterateTypedArrayElements<FixedInt16Array, int16_t>(isolate, array, true,
-                                                          true, visitor);
-      break;
-    }
-    case UINT16_ELEMENTS: {
-      IterateTypedArrayElements<FixedUint16Array, uint16_t>(
-          isolate, array, true, true, visitor);
-      break;
-    }
-    case INT32_ELEMENTS: {
-      IterateTypedArrayElements<FixedInt32Array, int32_t>(isolate, array, true,
-                                                          false, visitor);
-      break;
-    }
-    case UINT32_ELEMENTS: {
-      IterateTypedArrayElements<FixedUint32Array, uint32_t>(
-          isolate, array, true, false, visitor);
-      break;
-    }
-    case FLOAT32_ELEMENTS: {
-      IterateTypedArrayElements<FixedFloat32Array, float>(isolate, array, false,
-                                                          false, visitor);
-      break;
-    }
-    case FLOAT64_ELEMENTS: {
-      IterateTypedArrayElements<FixedFloat64Array, double>(
-          isolate, array, false, false, visitor);
-      break;
-    }
     case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
     case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
       for (uint32_t index = 0; index < length; index++) {
@@ -1176,10 +1175,21 @@
         Handle<Object> element;
         ASSIGN_RETURN_ON_EXCEPTION_VALUE(
             isolate, element, Object::GetElement(isolate, array, index), false);
-        visitor->visit(index, element);
+        if (!visitor->visit(index, element)) return false;
       }
       break;
     }
+    case NO_ELEMENTS:
+      break;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+      TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+      return IterateElementsSlow(isolate, receiver, length, visitor);
+    case FAST_STRING_WRAPPER_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS:
+      // |array| is guaranteed to be an array or typed array.
+      UNREACHABLE();
+      break;
   }
   visitor->increase_index_offset(length);
   return true;
@@ -1187,8 +1197,6 @@
 
 
 bool HasConcatSpreadableModifier(Isolate* isolate, Handle<JSArray> obj) {
-  DCHECK(isolate->IsFastArrayConstructorPrototypeChainIntact());
-  if (!FLAG_harmony_concat_spreadable) return false;
   Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
   Maybe<bool> maybe = JSReceiver::HasProperty(obj, key);
   return maybe.FromMaybe(false);
@@ -1198,21 +1206,22 @@
 static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
   HandleScope handle_scope(isolate);
   if (!obj->IsJSReceiver()) return Just(false);
-  if (FLAG_harmony_concat_spreadable) {
-    Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
-    Handle<Object> value;
-    MaybeHandle<Object> maybeValue =
-        i::Runtime::GetObjectProperty(isolate, obj, key);
-    if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
-    if (!value->IsUndefined()) return Just(value->BooleanValue());
-  }
+  Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
+  Handle<Object> value;
+  MaybeHandle<Object> maybeValue =
+      i::Runtime::GetObjectProperty(isolate, obj, key);
+  if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
+  if (!value->IsUndefined()) return Just(value->BooleanValue());
   return Object::IsArray(obj);
 }
 
 
-Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
+Object* Slow_ArrayConcat(Arguments* args, Handle<Object> species,
+                         Isolate* isolate) {
   int argument_count = args->length();
 
+  bool is_array_species = *species == isolate->context()->array_function();
+
   // Pass 1: estimate the length and number of elements of the result.
   // The actual length can be larger if any of the arguments have getters
   // that mutate other arguments (but will otherwise be precise).
@@ -1232,17 +1241,14 @@
       length_estimate = static_cast<uint32_t>(array->length()->Number());
       if (length_estimate != 0) {
         ElementsKind array_kind =
-            GetPackedElementsKind(array->map()->elements_kind());
+            GetPackedElementsKind(array->GetElementsKind());
         kind = GetMoreGeneralElementsKind(kind, array_kind);
       }
       element_estimate = EstimateElementCount(array);
     } else {
       if (obj->IsHeapObject()) {
-        if (obj->IsNumber()) {
-          kind = GetMoreGeneralElementsKind(kind, FAST_DOUBLE_ELEMENTS);
-        } else {
-          kind = GetMoreGeneralElementsKind(kind, FAST_ELEMENTS);
-        }
+        kind = GetMoreGeneralElementsKind(
+            kind, obj->IsNumber() ? FAST_DOUBLE_ELEMENTS : FAST_ELEMENTS);
       }
       length_estimate = 1;
       element_estimate = 1;
@@ -1263,7 +1269,8 @@
   // If estimated number of elements is more than half of length, a
   // fixed array (fast case) is more time and space-efficient than a
   // dictionary.
-  bool fast_case = (estimate_nof_elements * 2) >= estimate_result_length;
+  bool fast_case =
+      is_array_species && (estimate_nof_elements * 2) >= estimate_result_length;
 
   if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
     Handle<FixedArrayBase> storage =
@@ -1284,7 +1291,7 @@
         } else {
           JSArray* array = JSArray::cast(*obj);
           uint32_t length = static_cast<uint32_t>(array->length()->Number());
-          switch (array->map()->elements_kind()) {
+          switch (array->GetElementsKind()) {
             case FAST_HOLEY_DOUBLE_ELEMENTS:
             case FAST_DOUBLE_ELEMENTS: {
               // Empty array is FixedArray but not FixedDoubleArray.
@@ -1325,6 +1332,7 @@
             case FAST_HOLEY_ELEMENTS:
             case FAST_ELEMENTS:
             case DICTIONARY_ELEMENTS:
+            case NO_ELEMENTS:
               DCHECK_EQ(0u, length);
               break;
             default:
@@ -1335,30 +1343,30 @@
       }
     }
     if (!failure) {
-      Handle<JSArray> array = isolate->factory()->NewJSArray(0);
-      Smi* length = Smi::FromInt(j);
-      Handle<Map> map;
-      map = JSObject::GetElementsTransitionMap(array, kind);
-      array->set_map(*map);
-      array->set_length(length);
-      array->set_elements(*storage);
-      return *array;
+      return *isolate->factory()->NewJSArrayWithElements(storage, kind, j);
     }
     // In case of failure, fall through.
   }
 
-  Handle<FixedArray> storage;
+  Handle<Object> storage;
   if (fast_case) {
     // The backing storage array must have non-existing elements to preserve
     // holes across concat operations.
     storage =
         isolate->factory()->NewFixedArrayWithHoles(estimate_result_length);
-  } else {
+  } else if (is_array_species) {
     // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
     uint32_t at_least_space_for =
         estimate_nof_elements + (estimate_nof_elements >> 2);
-    storage = Handle<FixedArray>::cast(
-        SeededNumberDictionary::New(isolate, at_least_space_for));
+    storage = SeededNumberDictionary::New(isolate, at_least_space_for);
+  } else {
+    DCHECK(species->IsConstructor());
+    Handle<Object> length(Smi::FromInt(0), isolate);
+    Handle<Object> storage_object;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, storage_object,
+        Execution::New(isolate, species, species, 1, &length));
+    storage = storage_object;
   }
 
   ArrayConcatVisitor visitor(isolate, storage, fast_case);
@@ -1382,28 +1390,33 @@
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewRangeError(MessageTemplate::kInvalidArrayLength));
   }
-  return *visitor.ToArray();
+
+  if (is_array_species) {
+    return *visitor.ToArray();
+  } else {
+    return *visitor.storage_jsreceiver();
+  }
 }
 
 
 MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
-  if (!isolate->IsFastArrayConstructorPrototypeChainIntact()) {
-    return MaybeHandle<JSArray>();
-  }
   int n_arguments = args->length();
   int result_len = 0;
   {
     DisallowHeapAllocation no_gc;
-    Object* array_proto = isolate->array_function()->prototype();
     // Iterate through all the arguments performing checks
     // and calculating total length.
     for (int i = 0; i < n_arguments; i++) {
       Object* arg = (*args)[i];
       if (!arg->IsJSArray()) return MaybeHandle<JSArray>();
+      if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
+        return MaybeHandle<JSArray>();
+      }
+      // TODO(cbruni): support fast concatenation of DICTIONARY_ELEMENTS.
+      if (!JSObject::cast(arg)->HasFastElements()) {
+        return MaybeHandle<JSArray>();
+      }
       Handle<JSArray> array(JSArray::cast(arg), isolate);
-      if (!array->HasFastElements()) return MaybeHandle<JSArray>();
-      PrototypeIterator iter(isolate, arg);
-      if (iter.GetCurrent() != array_proto) return MaybeHandle<JSArray>();
       if (HasConcatSpreadableModifier(isolate, array)) {
         return MaybeHandle<JSArray>();
       }
@@ -1428,26 +1441,37 @@
 
 }  // namespace
 
+
 // ES6 22.1.3.1 Array.prototype.concat
 BUILTIN(ArrayConcat) {
   HandleScope scope(isolate);
 
-  Handle<Object> receiver;
-  if (!Object::ToObject(isolate, handle(args[0], isolate))
-           .ToHandle(&receiver)) {
+  Handle<Object> receiver = args.receiver();
+  // TODO(bmeurer): Do we really care about the exact exception message here?
+  if (receiver->IsNull() || receiver->IsUndefined()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
                               isolate->factory()->NewStringFromAsciiChecked(
                                   "Array.prototype.concat")));
   }
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, receiver, Object::ToObject(isolate, args.receiver()));
   args[0] = *receiver;
 
   Handle<JSArray> result_array;
-  if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
-    return *result_array;
+
+  // Reading @@species happens before anything else with a side effect, so
+  // we can do it here to determine whether to take the fast path.
+  Handle<Object> species;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
+  if (*species == isolate->context()->native_context()->array_function()) {
+    if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
+      return *result_array;
+    }
+    if (isolate->has_pending_exception()) return isolate->heap()->exception();
   }
-  if (isolate->has_pending_exception()) return isolate->heap()->exception();
-  return Slow_ArrayConcat(&args, isolate);
+  return Slow_ArrayConcat(&args, species, isolate);
 }
 
 
@@ -1461,6 +1485,77 @@
   return *isolate->factory()->ToBoolean(result.FromJust());
 }
 
+namespace {
+
+MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> to,
+                                       Handle<Object> next_source) {
+  // Non-empty strings are the only non-JSReceivers that need to be handled
+  // explicitly by Object.assign.
+  if (!next_source->IsJSReceiver()) {
+    return Just(!next_source->IsString() ||
+                String::cast(*next_source)->length() == 0);
+  }
+
+  Isolate* isolate = to->GetIsolate();
+  Handle<Map> map(JSReceiver::cast(*next_source)->map(), isolate);
+
+  if (!map->IsJSObjectMap()) return Just(false);
+  if (!map->OnlyHasSimpleProperties()) return Just(false);
+
+  Handle<JSObject> from = Handle<JSObject>::cast(next_source);
+  if (from->elements() != isolate->heap()->empty_fixed_array()) {
+    return Just(false);
+  }
+
+  Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+  int length = map->NumberOfOwnDescriptors();
+
+  bool stable = true;
+
+  for (int i = 0; i < length; i++) {
+    Handle<Name> next_key(descriptors->GetKey(i), isolate);
+    Handle<Object> prop_value;
+    // Directly decode from the descriptor array if |from| did not change shape.
+    if (stable) {
+      PropertyDetails details = descriptors->GetDetails(i);
+      if (!details.IsEnumerable()) continue;
+      if (details.kind() == kData) {
+        if (details.location() == kDescriptor) {
+          prop_value = handle(descriptors->GetValue(i), isolate);
+        } else {
+          Representation representation = details.representation();
+          FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+          prop_value = JSObject::FastPropertyAt(from, representation, index);
+        }
+      } else {
+        ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, prop_value,
+                                         Object::GetProperty(from, next_key),
+                                         Nothing<bool>());
+        stable = from->map() == *map;
+      }
+    } else {
+      // If the map did change, do a slower lookup. We are still guaranteed that
+      // the object has a simple shape, and that the key is a name.
+      LookupIterator it(from, next_key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+      if (!it.IsFound()) continue;
+      DCHECK(it.state() == LookupIterator::DATA ||
+             it.state() == LookupIterator::ACCESSOR);
+      if (!it.IsEnumerable()) continue;
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+          isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
+    }
+    LookupIterator it(to, next_key);
+    bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
+    Maybe<bool> result = Object::SetProperty(
+        &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+    if (result.IsNothing()) return result;
+    if (stable && call_to_js) stable = from->map() == *map;
+  }
+
+  return Just(true);
+}
+
+}  // namespace
 
 // ES6 19.1.2.1 Object.assign
 BUILTIN(ObjectAssign) {
@@ -1469,7 +1564,7 @@
 
   // 1. Let to be ? ToObject(target).
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target,
-                                     Execution::ToObject(isolate, target));
+                                     Object::ToObject(isolate, target));
   Handle<JSReceiver> to = Handle<JSReceiver>::cast(target);
   // 2. If only one argument was passed, return to.
   if (args.length() == 2) return *to;
@@ -1478,17 +1573,20 @@
   // 4. For each element nextSource of sources, in ascending index order,
   for (int i = 2; i < args.length(); ++i) {
     Handle<Object> next_source = args.at<Object>(i);
+    Maybe<bool> fast_assign = FastAssign(to, next_source);
+    if (fast_assign.IsNothing()) return isolate->heap()->exception();
+    if (fast_assign.FromJust()) continue;
     // 4a. If nextSource is undefined or null, let keys be an empty List.
-    if (next_source->IsUndefined() || next_source->IsNull()) continue;
     // 4b. Else,
     // 4b i. Let from be ToObject(nextSource).
+    // Only non-empty strings and JSReceivers have enumerable properties.
     Handle<JSReceiver> from =
         Object::ToObject(isolate, next_source).ToHandleChecked();
     // 4b ii. Let keys be ? from.[[OwnPropertyKeys]]().
     Handle<FixedArray> keys;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, keys, JSReceiver::GetKeys(from, JSReceiver::OWN_ONLY,
-                                           ALL_PROPERTIES, KEEP_NUMBERS));
+        isolate, keys,
+        JSReceiver::GetKeys(from, OWN_ONLY, ALL_PROPERTIES, KEEP_NUMBERS));
     // 4c. Repeat for each element nextKey of keys in List order,
     for (int j = 0; j < keys->length(); ++j) {
       Handle<Object> next_key(keys->get(j), isolate);
@@ -1503,7 +1601,7 @@
         Handle<Object> prop_value;
         ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
             isolate, prop_value,
-            Runtime::GetObjectProperty(isolate, from, next_key, STRICT));
+            Runtime::GetObjectProperty(isolate, from, next_key));
         // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
         Handle<Object> status;
         ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -1563,6 +1661,72 @@
 }
 
 
+// ES6 section 19.1.2.6 Object.getOwnPropertyDescriptor ( O, P )
+BUILTIN(ObjectGetOwnPropertyDescriptor) {
+  HandleScope scope(isolate);
+  // 1. Let obj be ? ToObject(O).
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+  // 2. Let key be ? ToPropertyKey(P).
+  Handle<Object> property = args.atOrUndefined(isolate, 2);
+  Handle<Name> key;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+                                     Object::ToName(isolate, property));
+  // 3. Let desc be ? obj.[[GetOwnProperty]](key).
+  PropertyDescriptor desc;
+  Maybe<bool> found =
+      JSReceiver::GetOwnPropertyDescriptor(isolate, receiver, key, &desc);
+  MAYBE_RETURN(found, isolate->heap()->exception());
+  // 4. Return FromPropertyDescriptor(desc).
+  if (!found.FromJust()) return isolate->heap()->undefined_value();
+  return *desc.ToObject(isolate);
+}
+
+
+namespace {
+
+Object* GetOwnPropertyKeys(Isolate* isolate,
+                           BuiltinArguments<BuiltinExtraArguments::kNone> args,
+                           PropertyFilter filter) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+  Handle<FixedArray> keys;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, keys,
+      JSReceiver::GetKeys(receiver, OWN_ONLY, filter, CONVERT_TO_STRING));
+  return *isolate->factory()->NewJSArrayWithElements(keys);
+}
+
+}  // namespace
+
+
+// ES6 section 19.1.2.7 Object.getOwnPropertyNames ( O )
+BUILTIN(ObjectGetOwnPropertyNames) {
+  return GetOwnPropertyKeys(isolate, args, SKIP_SYMBOLS);
+}
+
+
+// ES6 section 19.1.2.8 Object.getOwnPropertySymbols ( O )
+BUILTIN(ObjectGetOwnPropertySymbols) {
+  return GetOwnPropertyKeys(isolate, args, SKIP_STRINGS);
+}
+
+
+// ES#sec-object.is Object.is ( value1, value2 )
+BUILTIN(ObjectIs) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(3, args.length());
+  Handle<Object> value1 = args.at<Object>(1);
+  Handle<Object> value2 = args.at<Object>(2);
+  return isolate->heap()->ToBoolean(value1->SameValue(*value2));
+}
+
+
 // ES6 section 19.1.2.11 Object.isExtensible ( O )
 BUILTIN(ObjectIsExtensible) {
   HandleScope scope(isolate);
@@ -1608,9 +1772,9 @@
   Handle<Object> object = args.atOrUndefined(isolate, 1);
   Handle<JSReceiver> receiver;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
-                                     Execution::ToObject(isolate, object));
-  Handle<FixedArray> keys;
+                                     Object::ToObject(isolate, object));
 
+  Handle<FixedArray> keys;
   int enum_length = receiver->map()->EnumLength();
   if (enum_length != kInvalidEnumCacheSentinel &&
       JSObject::cast(*receiver)->elements() ==
@@ -1618,29 +1782,87 @@
     DCHECK(receiver->IsJSObject());
     DCHECK(!JSObject::cast(*receiver)->HasNamedInterceptor());
     DCHECK(!JSObject::cast(*receiver)->IsAccessCheckNeeded());
-    DCHECK(!HeapObject::cast(receiver->map()->prototype())
-                ->map()
-                ->is_hidden_prototype());
+    DCHECK(!receiver->map()->has_hidden_prototype());
     DCHECK(JSObject::cast(*receiver)->HasFastProperties());
     if (enum_length == 0) {
       keys = isolate->factory()->empty_fixed_array();
     } else {
       Handle<FixedArray> cache(
           receiver->map()->instance_descriptors()->GetEnumCache());
-      keys = isolate->factory()->NewFixedArray(enum_length);
-      for (int i = 0; i < enum_length; i++) {
-        keys->set(i, cache->get(i));
-      }
+      keys = isolate->factory()->CopyFixedArrayUpTo(cache, enum_length);
     }
   } else {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, keys,
-        JSReceiver::GetKeys(receiver, JSReceiver::OWN_ONLY, ENUMERABLE_STRINGS,
+        JSReceiver::GetKeys(receiver, OWN_ONLY, ENUMERABLE_STRINGS,
                             CONVERT_TO_STRING));
   }
   return *isolate->factory()->NewJSArrayWithElements(keys, FAST_ELEMENTS);
 }
 
+BUILTIN(ObjectValues) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+  Handle<FixedArray> values;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, values, JSReceiver::GetOwnValues(receiver, ENUMERABLE_STRINGS));
+  return *isolate->factory()->NewJSArrayWithElements(values);
+}
+
+
+BUILTIN(ObjectEntries) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+  Handle<FixedArray> entries;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, entries,
+      JSReceiver::GetOwnEntries(receiver, ENUMERABLE_STRINGS));
+  return *isolate->factory()->NewJSArrayWithElements(entries);
+}
+
+BUILTIN(ObjectGetOwnPropertyDescriptors) {
+  HandleScope scope(isolate);
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+  Handle<Object> undefined = isolate->factory()->undefined_value();
+
+  Handle<JSReceiver> receiver;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+
+  Handle<FixedArray> keys;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, keys, JSReceiver::GetKeys(receiver, OWN_ONLY, ALL_PROPERTIES,
+                                         CONVERT_TO_STRING));
+
+  Handle<Object> descriptors =
+      isolate->factory()->NewJSObject(isolate->object_function());
+
+  for (int i = 0; i < keys->length(); ++i) {
+    Handle<Name> key = Handle<Name>::cast(FixedArray::get(*keys, i, isolate));
+    PropertyDescriptor descriptor;
+    Maybe<bool> did_get_descriptor = JSReceiver::GetOwnPropertyDescriptor(
+        isolate, receiver, key, &descriptor);
+    MAYBE_RETURN(did_get_descriptor, isolate->heap()->exception());
+
+    Handle<Object> from_descriptor = did_get_descriptor.FromJust()
+                                         ? descriptor.ToObject(isolate)
+                                         : undefined;
+
+    LookupIterator it = LookupIterator::PropertyOrElement(
+        isolate, descriptors, key, LookupIterator::OWN);
+    Maybe<bool> success = JSReceiver::CreateDataProperty(&it, from_descriptor,
+                                                         Object::DONT_THROW);
+    CHECK(success.FromJust());
+  }
+
+  return *descriptors;
+}
 
 // ES6 section 19.1.2.15 Object.preventExtensions ( O )
 BUILTIN(ObjectPreventExtensions) {
@@ -1719,7 +1941,7 @@
 BUILTIN(GlobalEval) {
   HandleScope scope(isolate);
   Handle<Object> x = args.atOrUndefined(isolate, 1);
-  Handle<JSFunction> target = args.target();
+  Handle<JSFunction> target = args.target<JSFunction>();
   Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
   if (!x->IsString()) return *x;
   Handle<JSFunction> function;
@@ -1813,7 +2035,7 @@
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result, Object::GetPropertyOrElement(
-          Handle<JSReceiver>::cast(target), name, receiver));
+                           receiver, name, Handle<JSReceiver>::cast(target)));
 
   return *result;
 }
@@ -1859,8 +2081,9 @@
                                   "Reflect.getPrototypeOf")));
   }
   Handle<Object> prototype;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
-                                     Object::GetPrototype(isolate, target));
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(target);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, prototype, JSReceiver::GetPrototype(isolate, receiver));
   return *prototype;
 }
 
@@ -1925,9 +2148,9 @@
 
   Handle<FixedArray> keys;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, keys, JSReceiver::GetKeys(Handle<JSReceiver>::cast(target),
-                                         JSReceiver::OWN_ONLY, ALL_PROPERTIES,
-                                         CONVERT_TO_STRING));
+      isolate, keys,
+      JSReceiver::GetKeys(Handle<JSReceiver>::cast(target), OWN_ONLY,
+                          ALL_PROPERTIES, CONVERT_TO_STRING));
   return *isolate->factory()->NewJSArrayWithElements(keys);
 }
 
@@ -2007,6 +2230,180 @@
 
 
 // -----------------------------------------------------------------------------
+// ES6 section 19.3 Boolean Objects
+
+
+// ES6 section 19.3.1.1 Boolean ( value ) for the [[Call]] case.
+BUILTIN(BooleanConstructor) {
+  HandleScope scope(isolate);
+  Handle<Object> value = args.atOrUndefined(isolate, 1);
+  return isolate->heap()->ToBoolean(value->BooleanValue());
+}
+
+
+// ES6 section 19.3.1.1 Boolean ( value ) for the [[Construct]] case.
+BUILTIN(BooleanConstructor_ConstructStub) {
+  HandleScope scope(isolate);
+  Handle<Object> value = args.atOrUndefined(isolate, 1);
+  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+  DCHECK(*target == target->native_context()->boolean_function());
+  Handle<JSObject> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                     JSObject::New(target, new_target));
+  Handle<JSValue>::cast(result)->set_value(
+      isolate->heap()->ToBoolean(value->BooleanValue()));
+  return *result;
+}
+
+
+// ES6 section 19.3.3.2 Boolean.prototype.toString ( )
+BUILTIN(BooleanPrototypeToString) {
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.receiver();
+  if (receiver->IsJSValue()) {
+    receiver = handle(Handle<JSValue>::cast(receiver)->value(), isolate);
+  }
+  if (!receiver->IsBoolean()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kNotGeneric,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Boolean.prototype.toString")));
+  }
+  return Handle<Oddball>::cast(receiver)->to_string();
+}
+
+
+// ES6 section 19.3.3.3 Boolean.prototype.valueOf ( )
+BUILTIN(BooleanPrototypeValueOf) {
+  HandleScope scope(isolate);
+  Handle<Object> receiver = args.receiver();
+  if (receiver->IsJSValue()) {
+    receiver = handle(Handle<JSValue>::cast(receiver)->value(), isolate);
+  }
+  if (!receiver->IsBoolean()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kNotGeneric,
+                              isolate->factory()->NewStringFromAsciiChecked(
+                                  "Boolean.prototype.valueOf")));
+  }
+  return *receiver;
+}
+
+
+// -----------------------------------------------------------------------------
+// ES6 section 24.2 DataView Objects
+
+
+// ES6 section 24.2.2 The DataView Constructor for the [[Call]] case.
+BUILTIN(DataViewConstructor) {
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate,
+      NewTypeError(MessageTemplate::kConstructorNotFunction,
+                   isolate->factory()->NewStringFromAsciiChecked("DataView")));
+}
+
+
+// ES6 section 24.2.2 The DataView Constructor for the [[Construct]] case.
+BUILTIN(DataViewConstructor_ConstructStub) {
+  HandleScope scope(isolate);
+  Handle<JSFunction> target = args.target<JSFunction>();
+  Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+  Handle<Object> buffer = args.atOrUndefined(isolate, 1);
+  Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
+  Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
+
+  // 2. If Type(buffer) is not Object, throw a TypeError exception.
+  // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
+  //    TypeError exception.
+  if (!buffer->IsJSArrayBuffer()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
+  }
+  Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
+
+  // 4. Let numberOffset be ? ToNumber(byteOffset).
+  Handle<Object> number_offset;
+  if (byte_offset->IsUndefined()) {
+    // We intentionally violate the specification at this point to allow
+    // for new DataView(buffer) invocations to be equivalent to the full
+    // new DataView(buffer, 0) invocation.
+    number_offset = handle(Smi::FromInt(0), isolate);
+  } else {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_offset,
+                                       Object::ToNumber(byte_offset));
+  }
+
+  // 5. Let offset be ToInteger(numberOffset).
+  Handle<Object> offset;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, offset,
+                                     Object::ToInteger(isolate, number_offset));
+
+  // 6. If numberOffset ≠ offset or offset < 0, throw a RangeError exception.
+  if (number_offset->Number() != offset->Number() || offset->Number() < 0.0) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kInvalidDataViewOffset));
+  }
+
+  // 7. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+  // We currently violate the specification at this point.
+
+  // 8. Let bufferByteLength be the value of buffer's [[ArrayBufferByteLength]]
+  // internal slot.
+  double const buffer_byte_length = array_buffer->byte_length()->Number();
+
+  // 9. If offset > bufferByteLength, throw a RangeError exception
+  if (offset->Number() > buffer_byte_length) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError(MessageTemplate::kInvalidDataViewOffset));
+  }
+
+  Handle<Object> view_byte_length;
+  if (byte_length->IsUndefined()) {
+    // 10. If byteLength is undefined, then
+    //       a. Let viewByteLength be bufferByteLength - offset.
+    view_byte_length =
+        isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
+  } else {
+    // 11. Else,
+    //       a. Let viewByteLength be ? ToLength(byteLength).
+    //       b. If offset+viewByteLength > bufferByteLength, throw a RangeError
+    //          exception
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, view_byte_length, Object::ToLength(isolate, byte_length));
+    if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
+      THROW_NEW_ERROR_RETURN_FAILURE(
+          isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
+    }
+  }
+
+  // 12. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+  //     "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
+  //     [[ByteLength]], [[ByteOffset]]»).
+  // 13. Set O's [[DataView]] internal slot to true.
+  Handle<JSObject> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                     JSObject::New(target, new_target));
+  for (int i = 0; i < ArrayBufferView::kInternalFieldCount; ++i) {
+    Handle<JSDataView>::cast(result)->SetInternalField(i, Smi::FromInt(0));
+  }
+
+  // 14. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
+  Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
+
+  // 15. Set O's [[ByteLength]] internal slot to viewByteLength.
+  Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
+
+  // 16. Set O's [[ByteOffset]] internal slot to offset.
+  Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
+
+  // 17. Return O.
+  return *result;
+}
+
+
+// -----------------------------------------------------------------------------
 // ES6 section 20.3 Date Objects
 
 
@@ -2207,7 +2604,11 @@
   char buffer[128];
   Vector<char> str(buffer, arraysize(buffer));
   ToDateString(time_val, str, isolate->date_cache());
-  return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+  Handle<String> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+  return *result;
 }
 
 
@@ -2215,7 +2616,7 @@
 BUILTIN(DateConstructor_ConstructStub) {
   HandleScope scope(isolate);
   int const argc = args.length() - 1;
-  Handle<JSFunction> target = args.target();
+  Handle<JSFunction> target = args.target<JSFunction>();
   Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
   double time_val;
   if (argc == 0) {
@@ -2787,7 +3188,11 @@
   char buffer[128];
   Vector<char> str(buffer, arraysize(buffer));
   ToDateString(date->value()->Number(), str, isolate->date_cache(), kDateOnly);
-  return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+  Handle<String> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+  return *result;
 }
 
 
@@ -2827,7 +3232,11 @@
   char buffer[128];
   Vector<char> str(buffer, arraysize(buffer));
   ToDateString(date->value()->Number(), str, isolate->date_cache());
-  return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+  Handle<String> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+  return *result;
 }
 
 
@@ -2838,7 +3247,11 @@
   char buffer[128];
   Vector<char> str(buffer, arraysize(buffer));
   ToDateString(date->value()->Number(), str, isolate->date_cache(), kTimeOnly);
-  return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+  Handle<String> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+  return *result;
 }
 
 
@@ -3099,7 +3512,7 @@
 
   // Compile the string in the constructor and not a helper so that errors to
   // come from here.
-  Handle<JSFunction> target = args.target();
+  Handle<JSFunction> target = args.target<JSFunction>();
   Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
   Handle<JSFunction> function;
   {
@@ -3246,6 +3659,45 @@
   return *result;
 }
 
+// ES6 section 19.2.3.6 Function.prototype[@@hasInstance](V)
+BUILTIN(FunctionHasInstance) {
+  HandleScope scope(isolate);
+  Handle<Object> callable = args.receiver();
+  Handle<Object> object = args.atOrUndefined(isolate, 1);
+
+  // {callable} must have a [[Call]] internal method.
+  if (!callable->IsCallable()) {
+    return isolate->heap()->false_value();
+  }
+  // If {object} is not a receiver, return false.
+  if (!object->IsJSReceiver()) {
+    return isolate->heap()->false_value();
+  }
+  // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
+  // and use that instead of {callable}.
+  while (callable->IsJSBoundFunction()) {
+    callable =
+        handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
+               isolate);
+  }
+  DCHECK(callable->IsCallable());
+  // Get the "prototype" of {callable}; raise an error if it's not a receiver.
+  Handle<Object> prototype;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, prototype,
+      Object::GetProperty(callable, isolate->factory()->prototype_string()));
+  if (!prototype->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate,
+        NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
+  }
+  // Return whether or not {prototype} is in the prototype chain of {object}.
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+  Maybe<bool> result =
+      JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
+  MAYBE_RETURN(result, isolate->heap()->exception());
+  return isolate->heap()->ToBoolean(result.FromJust());
+}
 
 // ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
 BUILTIN(SymbolConstructor) {
@@ -3284,7 +3736,7 @@
 // ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
 BUILTIN(ArrayBufferConstructor) {
   HandleScope scope(isolate);
-  Handle<JSFunction> target = args.target();
+  Handle<JSFunction> target = args.target<JSFunction>();
   DCHECK(*target == target->native_context()->array_buffer_fun() ||
          *target == target->native_context()->shared_array_buffer_fun());
   THROW_NEW_ERROR_RETURN_FAILURE(
@@ -3296,7 +3748,7 @@
 // ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Construct]] case.
 BUILTIN(ArrayBufferConstructor_ConstructStub) {
   HandleScope scope(isolate);
-  Handle<JSFunction> target = args.target();
+  Handle<JSFunction> target = args.target<JSFunction>();
   Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
   Handle<Object> length = args.atOrUndefined(isolate, 1);
   DCHECK(*target == target->native_context()->array_buffer_fun() ||
@@ -3308,22 +3760,20 @@
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
   }
-  Handle<Map> initial_map;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, initial_map,
-      JSFunction::GetDerivedMap(isolate, target, new_target));
+  Handle<JSObject> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                     JSObject::New(target, new_target));
   size_t byte_length;
   if (!TryNumberToSize(isolate, *number_length, &byte_length)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
   }
-  Handle<JSArrayBuffer> result = Handle<JSArrayBuffer>::cast(
-      isolate->factory()->NewJSObjectFromMap(initial_map));
   SharedFlag shared_flag =
       (*target == target->native_context()->array_buffer_fun())
           ? SharedFlag::kNotShared
           : SharedFlag::kShared;
-  if (!JSArrayBuffer::SetupAllocatingData(result, isolate, byte_length, true,
+  if (!JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer>::cast(result),
+                                          isolate, byte_length, true,
                                           shared_flag)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
@@ -3393,23 +3843,37 @@
 MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
     Isolate* isolate, BuiltinArguments<BuiltinExtraArguments::kTarget> args) {
   HandleScope scope(isolate);
-  Handle<JSFunction> function = args.target();
-  DCHECK(args.receiver()->IsJSReceiver());
+  Handle<HeapObject> function = args.target<HeapObject>();
+  Handle<JSReceiver> receiver;
   // TODO(ishell): turn this back to a DCHECK.
-  CHECK(function->shared()->IsApiFunction());
+  CHECK(function->IsFunctionTemplateInfo() ||
+        Handle<JSFunction>::cast(function)->shared()->IsApiFunction());
 
-  Handle<FunctionTemplateInfo> fun_data(
-      function->shared()->get_api_func_data(), isolate);
+  Handle<FunctionTemplateInfo> fun_data =
+      function->IsFunctionTemplateInfo()
+          ? Handle<FunctionTemplateInfo>::cast(function)
+          : handle(JSFunction::cast(*function)->shared()->get_api_func_data());
   if (is_construct) {
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, fun_data,
-        ApiNatives::ConfigureInstance(isolate, fun_data,
-                                      Handle<JSObject>::cast(args.receiver())),
-        Object);
+    DCHECK(args.receiver()->IsTheHole());
+    if (fun_data->instance_template()->IsUndefined()) {
+      v8::Local<ObjectTemplate> templ =
+          ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate),
+                              ToApiHandle<v8::FunctionTemplate>(fun_data));
+      fun_data->set_instance_template(*Utils::OpenHandle(*templ));
+    }
+    Handle<ObjectTemplateInfo> instance_template(
+        ObjectTemplateInfo::cast(fun_data->instance_template()), isolate);
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+                               ApiNatives::InstantiateObject(instance_template),
+                               Object);
+    args[0] = *receiver;
+    DCHECK_EQ(*receiver, *args.receiver());
+  } else {
+    DCHECK(args.receiver()->IsJSReceiver());
+    receiver = args.at<JSReceiver>(0);
   }
 
   if (!is_construct && !fun_data->accept_any_receiver()) {
-    Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
     if (receiver->IsJSObject() && receiver->IsAccessCheckNeeded()) {
       Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
       if (!isolate->MayAccess(handle(isolate->context()), js_receiver)) {
@@ -3419,7 +3883,7 @@
     }
   }
 
-  Object* raw_holder = fun_data->GetCompatibleReceiver(isolate, args[0]);
+  Object* raw_holder = fun_data->GetCompatibleReceiver(isolate, *receiver);
 
   if (raw_holder->IsNull()) {
     // This function cannot be called with the given receiver.  Abort!
@@ -3463,7 +3927,7 @@
     }
   }
 
-  return scope.CloseAndEscape(args.receiver());
+  return scope.CloseAndEscape(receiver);
 }
 
 }  // namespace
@@ -3486,34 +3950,83 @@
   return *result;
 }
 
-
-Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode) {
-  switch (mode) {
-    case ConvertReceiverMode::kNullOrUndefined:
-      return CallFunction_ReceiverIsNullOrUndefined();
-    case ConvertReceiverMode::kNotNullOrUndefined:
-      return CallFunction_ReceiverIsNotNullOrUndefined();
-    case ConvertReceiverMode::kAny:
-      return CallFunction_ReceiverIsAny();
+Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode,
+                                    TailCallMode tail_call_mode) {
+  switch (tail_call_mode) {
+    case TailCallMode::kDisallow:
+      switch (mode) {
+        case ConvertReceiverMode::kNullOrUndefined:
+          return CallFunction_ReceiverIsNullOrUndefined();
+        case ConvertReceiverMode::kNotNullOrUndefined:
+          return CallFunction_ReceiverIsNotNullOrUndefined();
+        case ConvertReceiverMode::kAny:
+          return CallFunction_ReceiverIsAny();
+      }
+      break;
+    case TailCallMode::kAllow:
+      switch (mode) {
+        case ConvertReceiverMode::kNullOrUndefined:
+          return TailCallFunction_ReceiverIsNullOrUndefined();
+        case ConvertReceiverMode::kNotNullOrUndefined:
+          return TailCallFunction_ReceiverIsNotNullOrUndefined();
+        case ConvertReceiverMode::kAny:
+          return TailCallFunction_ReceiverIsAny();
+      }
+      break;
   }
   UNREACHABLE();
   return Handle<Code>::null();
 }
 
-
-Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
-  switch (mode) {
-    case ConvertReceiverMode::kNullOrUndefined:
-      return Call_ReceiverIsNullOrUndefined();
-    case ConvertReceiverMode::kNotNullOrUndefined:
-      return Call_ReceiverIsNotNullOrUndefined();
-    case ConvertReceiverMode::kAny:
-      return Call_ReceiverIsAny();
+Handle<Code> Builtins::Call(ConvertReceiverMode mode,
+                            TailCallMode tail_call_mode) {
+  switch (tail_call_mode) {
+    case TailCallMode::kDisallow:
+      switch (mode) {
+        case ConvertReceiverMode::kNullOrUndefined:
+          return Call_ReceiverIsNullOrUndefined();
+        case ConvertReceiverMode::kNotNullOrUndefined:
+          return Call_ReceiverIsNotNullOrUndefined();
+        case ConvertReceiverMode::kAny:
+          return Call_ReceiverIsAny();
+      }
+      break;
+    case TailCallMode::kAllow:
+      switch (mode) {
+        case ConvertReceiverMode::kNullOrUndefined:
+          return TailCall_ReceiverIsNullOrUndefined();
+        case ConvertReceiverMode::kNotNullOrUndefined:
+          return TailCall_ReceiverIsNotNullOrUndefined();
+        case ConvertReceiverMode::kAny:
+          return TailCall_ReceiverIsAny();
+      }
+      break;
   }
   UNREACHABLE();
   return Handle<Code>::null();
 }
 
+Handle<Code> Builtins::CallBoundFunction(TailCallMode tail_call_mode) {
+  switch (tail_call_mode) {
+    case TailCallMode::kDisallow:
+      return CallBoundFunction();
+    case TailCallMode::kAllow:
+      return TailCallBoundFunction();
+  }
+  UNREACHABLE();
+  return Handle<Code>::null();
+}
+
+Handle<Code> Builtins::InterpreterPushArgsAndCall(TailCallMode tail_call_mode) {
+  switch (tail_call_mode) {
+    case TailCallMode::kDisallow:
+      return InterpreterPushArgsAndCall();
+    case TailCallMode::kAllow:
+      return InterpreterPushArgsAndTailCall();
+  }
+  UNREACHABLE();
+  return Handle<Code>::null();
+}
 
 namespace {
 
@@ -3536,8 +4049,7 @@
 
 }  // namespace
 
-
-MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<JSFunction> function,
+MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<HeapObject> function,
                                                 Handle<Object> receiver,
                                                 int argc,
                                                 Handle<Object> args[]) {
@@ -3644,12 +4156,7 @@
 
 
 static void Generate_LoadIC_Normal(MacroAssembler* masm) {
-  LoadIC::GenerateNormal(masm, SLOPPY);
-}
-
-
-static void Generate_LoadIC_Normal_Strong(MacroAssembler* masm) {
-  LoadIC::GenerateNormal(masm, STRONG);
+  LoadIC::GenerateNormal(masm);
 }
 
 
@@ -3659,22 +4166,12 @@
 
 
 static void Generate_LoadIC_Slow(MacroAssembler* masm) {
-  LoadIC::GenerateRuntimeGetProperty(masm, SLOPPY);
-}
-
-
-static void Generate_LoadIC_Slow_Strong(MacroAssembler* masm) {
-  LoadIC::GenerateRuntimeGetProperty(masm, STRONG);
+  LoadIC::GenerateRuntimeGetProperty(masm);
 }
 
 
 static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateRuntimeGetProperty(masm, SLOPPY);
-}
-
-
-static void Generate_KeyedLoadIC_Slow_Strong(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateRuntimeGetProperty(masm, STRONG);
+  KeyedLoadIC::GenerateRuntimeGetProperty(masm);
 }
 
 
@@ -3684,12 +4181,7 @@
 
 
 static void Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateMegamorphic(masm, SLOPPY);
-}
-
-
-static void Generate_KeyedLoadIC_Megamorphic_Strong(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateMegamorphic(masm, STRONG);
+  KeyedLoadIC::GenerateMegamorphic(masm);
 }
 
 
diff --git a/src/builtins.h b/src/builtins.h
index a707a94..93e6e3d 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -69,6 +69,14 @@
   V(ArrayBufferConstructor_ConstructStub, kTargetAndNewTarget) \
   V(ArrayBufferIsView, kNone)                                  \
                                                                \
+  V(BooleanConstructor, kNone)                                 \
+  V(BooleanConstructor_ConstructStub, kTargetAndNewTarget)     \
+  V(BooleanPrototypeToString, kNone)                           \
+  V(BooleanPrototypeValueOf, kNone)                            \
+                                                               \
+  V(DataViewConstructor, kNone)                                \
+  V(DataViewConstructor_ConstructStub, kTargetAndNewTarget)    \
+                                                               \
   V(DateConstructor, kNone)                                    \
   V(DateConstructor_ConstructStub, kTargetAndNewTarget)        \
   V(DateNow, kNone)                                            \
@@ -102,6 +110,7 @@
   V(FunctionConstructor, kTargetAndNewTarget)                  \
   V(FunctionPrototypeBind, kNone)                              \
   V(FunctionPrototypeToString, kNone)                          \
+  V(FunctionHasInstance, kNone)                                \
                                                                \
   V(GeneratorFunctionConstructor, kTargetAndNewTarget)         \
                                                                \
@@ -110,10 +119,17 @@
   V(ObjectAssign, kNone)                                       \
   V(ObjectCreate, kNone)                                       \
   V(ObjectFreeze, kNone)                                       \
+  V(ObjectGetOwnPropertyDescriptor, kNone)                     \
+  V(ObjectGetOwnPropertyNames, kNone)                          \
+  V(ObjectGetOwnPropertySymbols, kNone)                        \
+  V(ObjectIs, kNone)                                           \
   V(ObjectIsExtensible, kNone)                                 \
   V(ObjectIsFrozen, kNone)                                     \
   V(ObjectIsSealed, kNone)                                     \
   V(ObjectKeys, kNone)                                         \
+  V(ObjectValues, kNone)                                       \
+  V(ObjectEntries, kNone)                                      \
+  V(ObjectGetOwnPropertyDescriptors, kNone)                    \
   V(ObjectPreventExtensions, kNone)                            \
   V(ObjectSeal, kNone)                                         \
   V(ObjectProtoToString, kNone)                                \
@@ -155,11 +171,22 @@
   V(CallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED,         \
     kNoExtraICState)                                                           \
   V(CallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
+  V(TailCallFunction_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED,        \
+    kNoExtraICState)                                                           \
+  V(TailCallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED,     \
+    kNoExtraICState)                                                           \
+  V(TailCallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState)   \
   V(CallBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
+  V(TailCallBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState)            \
   V(Call_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, kNoExtraICState)   \
   V(Call_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED,                 \
     kNoExtraICState)                                                           \
   V(Call_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
+  V(TailCall_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED,                \
+    kNoExtraICState)                                                           \
+  V(TailCall_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED,             \
+    kNoExtraICState)                                                           \
+  V(TailCall_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
                                                                                \
   V(ConstructFunction, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
   V(ConstructBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
@@ -173,6 +200,8 @@
   V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState)              \
   V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
   V(JSBuiltinsConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState)          \
+  V(JSBuiltinsConstructStubForDerived, BUILTIN, UNINITIALIZED,                 \
+    kNoExtraICState)                                                           \
   V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
   V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
   V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
@@ -188,10 +217,12 @@
   V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
   V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)        \
   V(InterpreterPushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
+  V(InterpreterPushArgsAndTailCall, BUILTIN, UNINITIALIZED, kNoExtraICState)   \
   V(InterpreterPushArgsAndConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState)  \
   V(InterpreterNotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)     \
   V(InterpreterNotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
   V(InterpreterNotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+  V(InterpreterEnterBytecodeDispatch, BUILTIN, UNINITIALIZED, kNoExtraICState) \
                                                                                \
   V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState)                      \
   V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState)                 \
@@ -200,9 +231,6 @@
   V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState)             \
   V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState)      \
                                                                                \
-  V(KeyedLoadIC_Megamorphic_Strong, KEYED_LOAD_IC, MEGAMORPHIC,                \
-    LoadICState::kStrongModeState)                                             \
-                                                                               \
   V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC,                            \
     StoreICState::kStrictModeState)                                            \
                                                                                \
@@ -246,6 +274,9 @@
   V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
   V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState)                        \
                                                                                \
+  V(MathMax, BUILTIN, UNINITIALIZED, kNoExtraICState)                          \
+  V(MathMin, BUILTIN, UNINITIALIZED, kNoExtraICState)                          \
+                                                                               \
   V(NumberConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
   V(NumberConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState)  \
                                                                                \
@@ -265,13 +296,10 @@
 // Define list of builtin handlers implemented in assembly.
 #define BUILTIN_LIST_H(V)                    \
   V(LoadIC_Slow,             LOAD_IC)        \
-  V(LoadIC_Slow_Strong,      LOAD_IC)        \
   V(KeyedLoadIC_Slow,        KEYED_LOAD_IC)  \
-  V(KeyedLoadIC_Slow_Strong, KEYED_LOAD_IC)  \
   V(StoreIC_Slow,            STORE_IC)       \
   V(KeyedStoreIC_Slow,       KEYED_STORE_IC) \
   V(LoadIC_Normal,           LOAD_IC)        \
-  V(LoadIC_Normal_Strong,    LOAD_IC)        \
   V(StoreIC_Normal,          STORE_IC)
 
 // Define list of builtins used by the debugger implemented in assembly.
@@ -332,8 +360,13 @@
 #undef DECLARE_BUILTIN_ACCESSOR_A
 
   // Convenience wrappers.
-  Handle<Code> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
-  Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
+  Handle<Code> CallFunction(
+      ConvertReceiverMode = ConvertReceiverMode::kAny,
+      TailCallMode tail_call_mode = TailCallMode::kDisallow);
+  Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny,
+                    TailCallMode tail_call_mode = TailCallMode::kDisallow);
+  Handle<Code> CallBoundFunction(TailCallMode tail_call_mode);
+  Handle<Code> InterpreterPushArgsAndCall(TailCallMode tail_call_mode);
 
   Code* builtin(Name name) {
     // Code::cast cannot be used here since we access builtins
@@ -358,7 +391,7 @@
   bool is_initialized() const { return initialized_; }
 
   MUST_USE_RESULT static MaybeHandle<Object> InvokeApiFunction(
-      Handle<JSFunction> function, Handle<Object> receiver, int argc,
+      Handle<HeapObject> function, Handle<Object> receiver, int argc,
       Handle<Object> args[]);
 
  private:
@@ -383,6 +416,7 @@
   static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
   static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
   static void Generate_JSBuiltinsConstructStub(MacroAssembler* masm);
+  static void Generate_JSBuiltinsConstructStubForDerived(MacroAssembler* masm);
   static void Generate_JSConstructStubApi(MacroAssembler* masm);
   static void Generate_JSEntryTrampoline(MacroAssembler* masm);
   static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
@@ -397,30 +431,71 @@
 
   // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
   static void Generate_CallFunction(MacroAssembler* masm,
-                                    ConvertReceiverMode mode);
+                                    ConvertReceiverMode mode,
+                                    TailCallMode tail_call_mode);
   static void Generate_CallFunction_ReceiverIsNullOrUndefined(
       MacroAssembler* masm) {
-    Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
+    Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
+                          TailCallMode::kDisallow);
   }
   static void Generate_CallFunction_ReceiverIsNotNullOrUndefined(
       MacroAssembler* masm) {
-    Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined);
+    Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
+                          TailCallMode::kDisallow);
   }
   static void Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
-    Generate_CallFunction(masm, ConvertReceiverMode::kAny);
+    Generate_CallFunction(masm, ConvertReceiverMode::kAny,
+                          TailCallMode::kDisallow);
+  }
+  static void Generate_TailCallFunction_ReceiverIsNullOrUndefined(
+      MacroAssembler* masm) {
+    Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
+                          TailCallMode::kAllow);
+  }
+  static void Generate_TailCallFunction_ReceiverIsNotNullOrUndefined(
+      MacroAssembler* masm) {
+    Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
+                          TailCallMode::kAllow);
+  }
+  static void Generate_TailCallFunction_ReceiverIsAny(MacroAssembler* masm) {
+    Generate_CallFunction(masm, ConvertReceiverMode::kAny,
+                          TailCallMode::kAllow);
   }
   // ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList)
-  static void Generate_CallBoundFunction(MacroAssembler* masm);
+  static void Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                             TailCallMode tail_call_mode);
+  static void Generate_CallBoundFunction(MacroAssembler* masm) {
+    Generate_CallBoundFunctionImpl(masm, TailCallMode::kDisallow);
+  }
+  static void Generate_TailCallBoundFunction(MacroAssembler* masm) {
+    Generate_CallBoundFunctionImpl(masm, TailCallMode::kAllow);
+  }
   // ES6 section 7.3.12 Call(F, V, [argumentsList])
-  static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode);
+  static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                            TailCallMode tail_call_mode);
   static void Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
-    Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined);
+    Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
+                  TailCallMode::kDisallow);
   }
   static void Generate_Call_ReceiverIsNotNullOrUndefined(MacroAssembler* masm) {
-    Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined);
+    Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
+                  TailCallMode::kDisallow);
   }
   static void Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
-    Generate_Call(masm, ConvertReceiverMode::kAny);
+    Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow);
+  }
+  static void Generate_TailCall_ReceiverIsNullOrUndefined(
+      MacroAssembler* masm) {
+    Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
+                  TailCallMode::kAllow);
+  }
+  static void Generate_TailCall_ReceiverIsNotNullOrUndefined(
+      MacroAssembler* masm) {
+    Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
+                  TailCallMode::kAllow);
+  }
+  static void Generate_TailCall_ReceiverIsAny(MacroAssembler* masm) {
+    Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
   }
 
   // ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget)
@@ -482,6 +557,17 @@
   static void Generate_InternalArrayCode(MacroAssembler* masm);
   static void Generate_ArrayCode(MacroAssembler* masm);
 
+  enum class MathMaxMinKind { kMax, kMin };
+  static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
+  // ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values )
+  static void Generate_MathMax(MacroAssembler* masm) {
+    Generate_MathMaxMin(masm, MathMaxMinKind::kMax);
+  }
+  // ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values )
+  static void Generate_MathMin(MacroAssembler* masm) {
+    Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
+  }
+
   // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
   static void Generate_NumberConstructor(MacroAssembler* masm);
   // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case.
@@ -496,11 +582,20 @@
 
   static void Generate_InterpreterEntryTrampoline(MacroAssembler* masm);
   static void Generate_InterpreterExitTrampoline(MacroAssembler* masm);
-  static void Generate_InterpreterPushArgsAndCall(MacroAssembler* masm);
+  static void Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+    return Generate_InterpreterPushArgsAndCallImpl(masm,
+                                                   TailCallMode::kDisallow);
+  }
+  static void Generate_InterpreterPushArgsAndTailCall(MacroAssembler* masm) {
+    return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kAllow);
+  }
+  static void Generate_InterpreterPushArgsAndCallImpl(
+      MacroAssembler* masm, TailCallMode tail_call_mode);
   static void Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm);
   static void Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm);
   static void Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm);
   static void Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm);
+  static void Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm);
 
 #define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C)                \
   static void Generate_Make##C##CodeYoungAgainEvenMarking(   \
diff --git a/src/code-factory.cc b/src/code-factory.cc
index 6d31a5f..9898282 100644
--- a/src/code-factory.cc
+++ b/src/code-factory.cc
@@ -12,44 +12,36 @@
 
 
 // static
-Callable CodeFactory::LoadIC(Isolate* isolate, TypeofMode typeof_mode,
-                             LanguageMode language_mode) {
-  return Callable(
-      LoadIC::initialize_stub(
-          isolate, LoadICState(typeof_mode, language_mode).GetExtraICState()),
-      LoadDescriptor(isolate));
+Callable CodeFactory::LoadIC(Isolate* isolate, TypeofMode typeof_mode) {
+  return Callable(LoadIC::initialize_stub(
+                      isolate, LoadICState(typeof_mode).GetExtraICState()),
+                  LoadDescriptor(isolate));
 }
 
 
 // static
 Callable CodeFactory::LoadICInOptimizedCode(
-    Isolate* isolate, TypeofMode typeof_mode, LanguageMode language_mode,
+    Isolate* isolate, TypeofMode typeof_mode,
     InlineCacheState initialization_state) {
   auto code = LoadIC::initialize_stub_in_optimized_code(
-      isolate, LoadICState(typeof_mode, language_mode).GetExtraICState(),
+      isolate, LoadICState(typeof_mode).GetExtraICState(),
       initialization_state);
   return Callable(code, LoadWithVectorDescriptor(isolate));
 }
 
 
 // static
-Callable CodeFactory::KeyedLoadIC(Isolate* isolate,
-                                  LanguageMode language_mode) {
-  ExtraICState state = is_strong(language_mode) ? LoadICState::kStrongModeState
-                                                : kNoExtraICState;
-  return Callable(KeyedLoadIC::initialize_stub(isolate, state),
+Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
+  return Callable(KeyedLoadIC::initialize_stub(isolate, kNoExtraICState),
                   LoadDescriptor(isolate));
 }
 
 
 // static
 Callable CodeFactory::KeyedLoadICInOptimizedCode(
-    Isolate* isolate, LanguageMode language_mode,
-    InlineCacheState initialization_state) {
-  ExtraICState state = is_strong(language_mode) ? LoadICState::kStrongModeState
-                                                : kNoExtraICState;
+    Isolate* isolate, InlineCacheState initialization_state) {
   auto code = KeyedLoadIC::initialize_stub_in_optimized_code(
-      isolate, initialization_state, state);
+      isolate, initialization_state, kNoExtraICState);
   if (initialization_state != MEGAMORPHIC) {
     return Callable(code, LoadWithVectorDescriptor(isolate));
   }
@@ -59,18 +51,20 @@
 
 // static
 Callable CodeFactory::CallIC(Isolate* isolate, int argc,
-                             ConvertReceiverMode mode) {
-  return Callable(CallIC::initialize_stub(isolate, argc, mode),
+                             ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
+  return Callable(CallIC::initialize_stub(isolate, argc, mode, tail_call_mode),
                   CallFunctionWithFeedbackDescriptor(isolate));
 }
 
 
 // static
 Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
-                                            ConvertReceiverMode mode) {
-  return Callable(
-      CallIC::initialize_stub_in_optimized_code(isolate, argc, mode),
-      CallFunctionWithFeedbackAndVectorDescriptor(isolate));
+                                            ConvertReceiverMode mode,
+                                            TailCallMode tail_call_mode) {
+  return Callable(CallIC::initialize_stub_in_optimized_code(isolate, argc, mode,
+                                                            tail_call_mode),
+                  CallFunctionWithFeedbackAndVectorDescriptor(isolate));
 }
 
 
@@ -118,9 +112,8 @@
 
 
 // static
-Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op,
-                                Strength strength) {
-  Handle<Code> code = CompareIC::GetUninitialized(isolate, op, strength);
+Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
+  Handle<Code> code = CompareIC::GetUninitialized(isolate, op);
   return Callable(code, CompareDescriptor(isolate));
 }
 
@@ -133,9 +126,8 @@
 
 
 // static
-Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op,
-                                 Strength strength) {
-  BinaryOpICStub stub(isolate, op, strength);
+Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op) {
+  BinaryOpICStub stub(isolate, op);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
@@ -169,6 +161,13 @@
 
 
 // static
+Callable CodeFactory::ToName(Isolate* isolate) {
+  ToNameStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
 Callable CodeFactory::ToLength(Isolate* isolate) {
   ToLengthStub stub(isolate);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -271,19 +270,29 @@
 
 
 // static
-Callable CodeFactory::ArgumentsAccess(Isolate* isolate,
-                                      bool is_unmapped_arguments,
-                                      bool has_duplicate_parameters) {
-  ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
-      is_unmapped_arguments, has_duplicate_parameters);
-  ArgumentsAccessStub stub(isolate, type);
+Callable CodeFactory::FastNewObject(Isolate* isolate) {
+  FastNewObjectStub stub(isolate);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
 
 // static
-Callable CodeFactory::RestArgumentsAccess(Isolate* isolate) {
-  RestParamAccessStub stub(isolate);
+Callable CodeFactory::FastNewRestParameter(Isolate* isolate) {
+  FastNewRestParameterStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::FastNewSloppyArguments(Isolate* isolate) {
+  FastNewSloppyArgumentsStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::FastNewStrictArguments(Isolate* isolate) {
+  FastNewStrictArgumentsStub stub(isolate);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
 
@@ -345,9 +354,11 @@
 
 
 // static
-Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate) {
-  return Callable(isolate->builtins()->InterpreterPushArgsAndCall(),
-                  InterpreterPushArgsAndCallDescriptor(isolate));
+Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate,
+                                                 TailCallMode tail_call_mode) {
+  return Callable(
+      isolate->builtins()->InterpreterPushArgsAndCall(tail_call_mode),
+      InterpreterPushArgsAndCallDescriptor(isolate));
 }
 
 
diff --git a/src/code-factory.h b/src/code-factory.h
index 2126790..fb1a165 100644
--- a/src/code-factory.h
+++ b/src/code-factory.h
@@ -32,21 +32,20 @@
 class CodeFactory final {
  public:
   // Initial states for ICs.
-  static Callable LoadIC(Isolate* isolate, TypeofMode typeof_mode,
-                         LanguageMode language_mode);
+  static Callable LoadIC(Isolate* isolate, TypeofMode typeof_mode);
   static Callable LoadICInOptimizedCode(Isolate* isolate,
                                         TypeofMode typeof_mode,
-                                        LanguageMode language_mode,
                                         InlineCacheState initialization_state);
-  static Callable KeyedLoadIC(Isolate* isolate, LanguageMode language_mode);
+  static Callable KeyedLoadIC(Isolate* isolate);
   static Callable KeyedLoadICInOptimizedCode(
-      Isolate* isolate, LanguageMode language_mode,
-      InlineCacheState initialization_state);
+      Isolate* isolate, InlineCacheState initialization_state);
   static Callable CallIC(Isolate* isolate, int argc,
-                         ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+                         ConvertReceiverMode mode = ConvertReceiverMode::kAny,
+                         TailCallMode tail_call_mode = TailCallMode::kDisallow);
   static Callable CallICInOptimizedCode(
       Isolate* isolate, int argc,
-      ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+      ConvertReceiverMode mode = ConvertReceiverMode::kAny,
+      TailCallMode tail_call_mode = TailCallMode::kDisallow);
   static Callable StoreIC(Isolate* isolate, LanguageMode mode);
   static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode,
                                          InlineCacheState initialization_state);
@@ -55,12 +54,10 @@
       Isolate* isolate, LanguageMode mode,
       InlineCacheState initialization_state);
 
-  static Callable CompareIC(Isolate* isolate, Token::Value op,
-                            Strength strength);
+  static Callable CompareIC(Isolate* isolate, Token::Value op);
   static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
 
-  static Callable BinaryOpIC(Isolate* isolate, Token::Value op,
-                             Strength strength);
+  static Callable BinaryOpIC(Isolate* isolate, Token::Value op);
 
   // Code stubs. Add methods here as needed to reduce dependency on
   // code-stubs.h.
@@ -70,6 +67,7 @@
 
   static Callable ToNumber(Isolate* isolate);
   static Callable ToString(Isolate* isolate);
+  static Callable ToName(Isolate* isolate);
   static Callable ToLength(Isolate* isolate);
   static Callable ToObject(Isolate* isolate);
   static Callable NumberToString(Isolate* isolate);
@@ -91,10 +89,10 @@
   static Callable FastNewContext(Isolate* isolate, int slot_count);
   static Callable FastNewClosure(Isolate* isolate, LanguageMode language_mode,
                                  FunctionKind kind);
-
-  static Callable ArgumentsAccess(Isolate* isolate, bool is_unmapped_arguments,
-                                  bool has_duplicate_parameters);
-  static Callable RestArgumentsAccess(Isolate* isolate);
+  static Callable FastNewObject(Isolate* isolate);
+  static Callable FastNewRestParameter(Isolate* isolate);
+  static Callable FastNewSloppyArguments(Isolate* isolate);
+  static Callable FastNewStrictArguments(Isolate* isolate);
 
   static Callable AllocateHeapNumber(Isolate* isolate);
   static Callable AllocateMutableHeapNumber(Isolate* isolate);
@@ -108,7 +106,8 @@
   static Callable Construct(Isolate* isolate);
   static Callable ConstructFunction(Isolate* isolate);
 
-  static Callable InterpreterPushArgsAndCall(Isolate* isolate);
+  static Callable InterpreterPushArgsAndCall(Isolate* isolate,
+                                             TailCallMode tail_call_mode);
   static Callable InterpreterPushArgsAndConstruct(Isolate* isolate);
   static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
 };
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 2fab578..461baaa 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -34,11 +34,12 @@
 
 class CodeStubGraphBuilderBase : public HGraphBuilder {
  public:
-  explicit CodeStubGraphBuilderBase(CompilationInfo* info)
-      : HGraphBuilder(info),
+  explicit CodeStubGraphBuilderBase(CompilationInfo* info, CodeStub* code_stub)
+      : HGraphBuilder(info, code_stub->GetCallInterfaceDescriptor()),
         arguments_length_(NULL),
         info_(info),
-        descriptor_(info->code_stub()),
+        code_stub_(code_stub),
+        descriptor_(code_stub),
         context_(NULL) {
     int parameter_count = GetParameterCount();
     parameters_.Reset(new HParameter*[parameter_count]);
@@ -68,7 +69,7 @@
     return arguments_length_;
   }
   CompilationInfo* info() { return info_; }
-  CodeStub* stub() { return info_->code_stub(); }
+  CodeStub* stub() { return code_stub_; }
   HContext* context() { return context_; }
   Isolate* isolate() { return info_->isolate(); }
 
@@ -124,6 +125,7 @@
   base::SmartArrayPointer<HParameter*> parameters_;
   HValue* arguments_length_;
   CompilationInfo* info_;
+  CodeStub* code_stub_;
   CodeStubDescriptor descriptor_;
   HContext* context_;
 };
@@ -178,6 +180,7 @@
 
   context_ = Add<HContext>();
   start_environment->BindContext(context_);
+  start_environment->Bind(param_count, context_);
 
   Add<HSimulate>(BailoutId::StubEntry());
 
@@ -214,8 +217,8 @@
 template <class Stub>
 class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
  public:
-  explicit CodeStubGraphBuilder(CompilationInfo* info)
-      : CodeStubGraphBuilderBase(info) {}
+  explicit CodeStubGraphBuilder(CompilationInfo* info, CodeStub* stub)
+      : CodeStubGraphBuilderBase(info, stub) {}
 
  protected:
   virtual HValue* BuildCodeStub() {
@@ -269,13 +272,8 @@
   masm.GetCode(&desc);
 
   // Copy the generated code into a heap object.
-  Code::Flags flags = Code::ComputeFlags(
-      GetCodeKind(),
-      GetICState(),
-      GetExtraICState(),
-      GetStubType());
   Handle<Code> new_object = factory->NewCode(
-      desc, flags, masm.CodeObject(), NeedsImmovableCode());
+      desc, GetCodeFlags(), masm.CodeObject(), NeedsImmovableCode());
   return new_object;
 }
 
@@ -297,8 +295,15 @@
     timer.Start();
   }
   Zone zone;
-  CompilationInfo info(stub, isolate, &zone);
-  CodeStubGraphBuilder<Stub> builder(&info);
+  CompilationInfo info(CodeStub::MajorName(stub->MajorKey()), isolate, &zone,
+                       stub->GetCodeFlags());
+  // Parameter count is number of stack parameters.
+  int parameter_count = descriptor.GetStackParameterCount();
+  if (descriptor.function_mode() == NOT_JS_FUNCTION_STUB_MODE) {
+    parameter_count--;
+  }
+  info.set_parameter_count(parameter_count);
+  CodeStubGraphBuilder<Stub> builder(&info, stub);
   LChunk* chunk = OptimizeGraph(builder.CreateGraph());
   Handle<Code> code = chunk->Codegen();
   if (FLAG_profile_hydrogen_code_stub_compilation) {
@@ -314,7 +319,7 @@
 HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
   info()->MarkAsSavesCallerDoubles();
   HValue* number = GetParameter(NumberToStringStub::kNumber);
-  return BuildNumberToString(number, Type::Number(zone()));
+  return BuildNumberToString(number, Type::Number());
 }
 
 
@@ -1464,16 +1469,15 @@
       if_leftisstring.If<HIsStringAndBranch>(left);
       if_leftisstring.Then();
       {
-        Push(BuildBinaryOperation(state.op(), left, right, Type::String(zone()),
+        Push(BuildBinaryOperation(state.op(), left, right, Type::String(),
                                   right_type, result_type,
-                                  state.fixed_right_arg(), allocation_mode,
-                                  state.strength()));
+                                  state.fixed_right_arg(), allocation_mode));
       }
       if_leftisstring.Else();
       {
-        Push(BuildBinaryOperation(
-            state.op(), left, right, left_type, right_type, result_type,
-            state.fixed_right_arg(), allocation_mode, state.strength()));
+        Push(BuildBinaryOperation(state.op(), left, right, left_type,
+                                  right_type, result_type,
+                                  state.fixed_right_arg(), allocation_mode));
       }
       if_leftisstring.End();
       result = Pop();
@@ -1483,23 +1487,22 @@
       if_rightisstring.Then();
       {
         Push(BuildBinaryOperation(state.op(), left, right, left_type,
-                                  Type::String(zone()), result_type,
-                                  state.fixed_right_arg(), allocation_mode,
-                                  state.strength()));
+                                  Type::String(), result_type,
+                                  state.fixed_right_arg(), allocation_mode));
       }
       if_rightisstring.Else();
       {
-        Push(BuildBinaryOperation(
-            state.op(), left, right, left_type, right_type, result_type,
-            state.fixed_right_arg(), allocation_mode, state.strength()));
+        Push(BuildBinaryOperation(state.op(), left, right, left_type,
+                                  right_type, result_type,
+                                  state.fixed_right_arg(), allocation_mode));
       }
       if_rightisstring.End();
       result = Pop();
     }
   } else {
-    result = BuildBinaryOperation(
-        state.op(), left, right, left_type, right_type, result_type,
-        state.fixed_right_arg(), allocation_mode, state.strength());
+    result = BuildBinaryOperation(state.op(), left, right, left_type,
+                                  right_type, result_type,
+                                  state.fixed_right_arg(), allocation_mode);
   }
 
   // If we encounter a generic argument, the number conversion is
@@ -1533,7 +1536,7 @@
 
   return BuildBinaryOperation(state.op(), left, right, left_type, right_type,
                               result_type, state.fixed_right_arg(),
-                              allocation_mode, state.strength());
+                              allocation_mode);
 }
 
 
@@ -2154,8 +2157,7 @@
 
   HValue* hash = BuildElementIndexHash(key);
 
-  return BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash,
-                                             casted_stub()->language_mode());
+  return BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash);
 }
 
 
@@ -2186,8 +2188,8 @@
 class CodeStubGraphBuilder<KeyedLoadGenericStub>
     : public CodeStubGraphBuilderBase {
  public:
-  explicit CodeStubGraphBuilder(CompilationInfo* info)
-      : CodeStubGraphBuilderBase(info) {}
+  explicit CodeStubGraphBuilder(CompilationInfo* info, CodeStub* stub)
+      : CodeStubGraphBuilderBase(info, stub) {}
 
  protected:
   virtual HValue* BuildCodeStub();
@@ -2289,8 +2291,7 @@
 
       HValue* hash = BuildElementIndexHash(key);
 
-      Push(BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash,
-                                               casted_stub()->language_mode()));
+      Push(BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash));
     }
     kind_if.Else();
 
@@ -2334,8 +2335,8 @@
 
       hash = AddUncasted<HShr>(hash, Add<HConstant>(Name::kHashShift));
 
-      HValue* value = BuildUncheckedDictionaryElementLoad(
-          receiver, properties, key, hash, casted_stub()->language_mode());
+      HValue* value =
+          BuildUncheckedDictionaryElementLoad(receiver, properties, key, hash);
       Push(value);
     }
     if_dict_properties.Else();
@@ -2412,10 +2413,7 @@
         // KeyedLookupCache miss; call runtime.
         Add<HPushArguments>(receiver, key);
         Push(Add<HCallRuntime>(
-            Runtime::FunctionForId(is_strong(casted_stub()->language_mode())
-                                       ? Runtime::kKeyedGetPropertyStrong
-                                       : Runtime::kKeyedGetProperty),
-            2));
+            Runtime::FunctionForId(Runtime::kKeyedGetProperty), 2));
       }
       inline_or_runtime.End();
     }
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 1754288..4e5efcd 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -96,6 +96,12 @@
 }
 
 
+Code::Flags CodeStub::GetCodeFlags() const {
+  return Code::ComputeFlags(GetCodeKind(), GetICState(), GetExtraICState(),
+                            GetStubType());
+}
+
+
 Handle<Code> CodeStub::GetCodeCopy(const Code::FindAndReplacePattern& pattern) {
   Handle<Code> ic = GetCode();
   ic = isolate()->factory()->CopyCode(ic);
@@ -270,7 +276,7 @@
 void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
   // Generate the uninitialized versions of the stub.
   for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
-    BinaryOpICStub stub(isolate, static_cast<Token::Value>(op), Strength::WEAK);
+    BinaryOpICStub stub(isolate, static_cast<Token::Value>(op));
     stub.GetCode();
   }
 
@@ -453,9 +459,7 @@
     state.Add(NULL_TYPE);
   } else if (object->IsUndefined()) {
     state.Add(UNDEFINED);
-  } else if (object->IsUndetectableObject() ||
-             object->IsOddball() ||
-             !object->IsHeapObject()) {
+  } else if (object->IsUndetectableObject() || object->IsSmi()) {
     state.RemoveAll();
     state.Add(GENERIC);
   } else if (IsMonomorphic()) {
@@ -474,7 +478,7 @@
   Zone zone;
   CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
   compiler::CodeStubAssembler assembler(isolate(), &zone, descriptor,
-                                        GetCodeKind(), name);
+                                        GetCodeFlags(), name);
   GenerateAssembly(&assembler);
   return assembler.GenerateCode();
 }
@@ -549,18 +553,17 @@
 
 Type* CompareNilICStub::GetType(Zone* zone, Handle<Map> map) {
   State state = this->state();
-  if (state.Contains(CompareNilICStub::GENERIC)) return Type::Any(zone);
+  if (state.Contains(CompareNilICStub::GENERIC)) return Type::Any();
 
-  Type* result = Type::None(zone);
+  Type* result = Type::None();
   if (state.Contains(CompareNilICStub::UNDEFINED)) {
-    result = Type::Union(result, Type::Undefined(zone), zone);
+    result = Type::Union(result, Type::Undefined(), zone);
   }
   if (state.Contains(CompareNilICStub::NULL_TYPE)) {
-    result = Type::Union(result, Type::Null(zone), zone);
+    result = Type::Union(result, Type::Null(), zone);
   }
   if (state.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
-    Type* type =
-        map.is_null() ? Type::Detectable(zone) : Type::Class(map, zone);
+    Type* type = map.is_null() ? Type::Detectable() : Type::Class(map, zone);
     result = Type::Union(result, type, zone);
   }
 
@@ -570,8 +573,7 @@
 
 Type* CompareNilICStub::GetInputType(Zone* zone, Handle<Map> map) {
   Type* output_type = GetType(zone, map);
-  Type* nil_type =
-      nil_value() == kNullValue ? Type::Null(zone) : Type::Undefined(zone);
+  Type* nil_type = nil_value() == kNullValue ? Type::Null() : Type::Undefined();
   return Type::Union(output_type, nil_type, zone);
 }
 
@@ -599,9 +601,7 @@
 void KeyedLoadGenericStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   descriptor->Initialize(
-      Runtime::FunctionForId(is_strong(language_mode())
-                                 ? Runtime::kKeyedGetPropertyStrong
-                                 : Runtime::kKeyedGetProperty)->entry);
+      Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry);
 }
 
 
@@ -798,28 +798,8 @@
 
 
 void StoreElementStub::Generate(MacroAssembler* masm) {
-  switch (elements_kind()) {
-    case FAST_ELEMENTS:
-    case FAST_HOLEY_ELEMENTS:
-    case FAST_SMI_ELEMENTS:
-    case FAST_HOLEY_SMI_ELEMENTS:
-    case FAST_DOUBLE_ELEMENTS:
-    case FAST_HOLEY_DOUBLE_ELEMENTS:
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
-    case TYPE##_ELEMENTS:
-
-    TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-      UNREACHABLE();
-      break;
-    case DICTIONARY_ELEMENTS:
-      ElementHandlerCompiler::GenerateStoreSlow(masm);
-      break;
-    case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
-    case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
-      UNREACHABLE();
-      break;
-  }
+  DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind());
+  ElementHandlerCompiler::GenerateStoreSlow(masm);
 }
 
 
@@ -838,52 +818,6 @@
 }
 
 
-void RestParamAccessStub::Generate(MacroAssembler* masm) { GenerateNew(masm); }
-
-
-void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
-  switch (type()) {
-    case READ_ELEMENT:
-      GenerateReadElement(masm);
-      break;
-    case NEW_SLOPPY_FAST:
-      GenerateNewSloppyFast(masm);
-      break;
-    case NEW_SLOPPY_SLOW:
-      GenerateNewSloppySlow(masm);
-      break;
-    case NEW_STRICT:
-      GenerateNewStrict(masm);
-      break;
-  }
-}
-
-
-void ArgumentsAccessStub::PrintName(std::ostream& os) const {  // NOLINT
-  os << "ArgumentsAccessStub_";
-  switch (type()) {
-    case READ_ELEMENT:
-      os << "ReadElement";
-      break;
-    case NEW_SLOPPY_FAST:
-      os << "NewSloppyFast";
-      break;
-    case NEW_SLOPPY_SLOW:
-      os << "NewSloppySlow";
-      break;
-    case NEW_STRICT:
-      os << "NewStrict";
-      break;
-  }
-  return;
-}
-
-
-void RestParamAccessStub::PrintName(std::ostream& os) const {  // NOLINT
-  os << "RestParamAccessStub_";
-}
-
-
 void ArrayConstructorStub::PrintName(std::ostream& os) const {  // NOLINT
   os << "ArrayConstructorStub";
   switch (argument_count()) {
@@ -964,9 +898,9 @@
     Add(SPEC_OBJECT);
     return !object->IsUndetectableObject();
   } else if (object->IsString()) {
+    DCHECK(!object->IsUndetectableObject());
     Add(STRING);
-    return !object->IsUndetectableObject() &&
-        String::cast(*object)->length() != 0;
+    return String::cast(*object)->length() != 0;
   } else if (object->IsSymbol()) {
     Add(SYMBOL);
     return true;
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 21e2135..f370ce6 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -21,7 +21,6 @@
 // List of code stubs used on all platforms.
 #define CODE_STUB_LIST_ALL_PLATFORMS(V)     \
   /* PlatformCodeStubs */                   \
-  V(ArgumentsAccess)                        \
   V(ArrayConstructor)                       \
   V(BinaryOpICWithAllocationSite)           \
   V(CallApiFunction)                        \
@@ -44,7 +43,6 @@
   V(MathPow)                                \
   V(ProfileEntryHook)                       \
   V(RecordWrite)                            \
-  V(RestParamAccess)                        \
   V(RegExpExec)                             \
   V(StoreBufferOverflow)                    \
   V(StoreElement)                           \
@@ -54,6 +52,7 @@
   V(ToNumber)                               \
   V(ToLength)                               \
   V(ToString)                               \
+  V(ToName)                                 \
   V(ToObject)                               \
   V(VectorStoreICTrampoline)                \
   V(VectorKeyedStoreICTrampoline)           \
@@ -77,6 +76,10 @@
   V(FastCloneShallowObject)                 \
   V(FastNewClosure)                         \
   V(FastNewContext)                         \
+  V(FastNewObject)                          \
+  V(FastNewRestParameter)                   \
+  V(FastNewSloppyArguments)                 \
+  V(FastNewStrictArguments)                 \
   V(GrowArrayElements)                      \
   V(InternalArrayNArgumentsConstructor)     \
   V(InternalArrayNoArgumentConstructor)     \
@@ -240,6 +243,8 @@
   virtual ExtraICState GetExtraICState() const { return kNoExtraICState; }
   virtual Code::StubType GetStubType() const { return Code::NORMAL; }
 
+  Code::Flags GetCodeFlags() const;
+
   friend std::ostream& operator<<(std::ostream& os, const CodeStub& s) {
     s.PrintName(os);
     return os;
@@ -323,8 +328,10 @@
 
 
 #define DEFINE_CODE_STUB(NAME, SUPER)                      \
- protected:                                                \
+ public:                                                   \
   inline Major MajorKey() const override { return NAME; }; \
+                                                           \
+ protected:                                                \
   DEFINE_CODE_STUB_BASE(NAME##Stub, SUPER)
 
 
@@ -720,6 +727,55 @@
 };
 
 
+class FastNewObjectStub final : public PlatformCodeStub {
+ public:
+  explicit FastNewObjectStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewObject);
+  DEFINE_PLATFORM_CODE_STUB(FastNewObject, PlatformCodeStub);
+};
+
+
+// TODO(turbofan): This stub should be possible to write in TurboFan
+// using the CodeStubAssembler very soon in a way that is as efficient
+// and easy as the current handwritten version, which is partly a copy
+// of the strict arguments object materialization code.
+class FastNewRestParameterStub final : public PlatformCodeStub {
+ public:
+  explicit FastNewRestParameterStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewRestParameter);
+  DEFINE_PLATFORM_CODE_STUB(FastNewRestParameter, PlatformCodeStub);
+};
+
+
+// TODO(turbofan): This stub should be possible to write in TurboFan
+// using the CodeStubAssembler very soon in a way that is as efficient
+// and easy as the current handwritten version.
+class FastNewSloppyArgumentsStub final : public PlatformCodeStub {
+ public:
+  explicit FastNewSloppyArgumentsStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewSloppyArguments);
+  DEFINE_PLATFORM_CODE_STUB(FastNewSloppyArguments, PlatformCodeStub);
+};
+
+
+// TODO(turbofan): This stub should be possible to write in TurboFan
+// using the CodeStubAssembler very soon in a way that is as efficient
+// and easy as the current handwritten version.
+class FastNewStrictArgumentsStub final : public PlatformCodeStub {
+ public:
+  explicit FastNewStrictArgumentsStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewStrictArguments);
+  DEFINE_PLATFORM_CODE_STUB(FastNewStrictArguments, PlatformCodeStub);
+};
+
+
 class FastCloneRegExpStub final : public HydrogenCodeStub {
  public:
   explicit FastCloneRegExpStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
@@ -920,6 +976,7 @@
  protected:
   int arg_count() const { return state().argc(); }
   ConvertReceiverMode convert_mode() const { return state().convert_mode(); }
+  TailCallMode tail_call_mode() const { return state().tail_call_mode(); }
 
   CallICState state() const {
     return CallICState(static_cast<ExtraICState>(minor_key_));
@@ -1383,11 +1440,13 @@
 
 class CallApiAccessorStub : public PlatformCodeStub {
  public:
-  CallApiAccessorStub(Isolate* isolate, bool is_store, bool call_data_undefined)
+  CallApiAccessorStub(Isolate* isolate, bool is_store, bool call_data_undefined,
+                      bool is_lazy)
       : PlatformCodeStub(isolate) {
     minor_key_ = IsStoreBits::encode(is_store) |
                  CallDataUndefinedBits::encode(call_data_undefined) |
-                 ArgumentBits::encode(is_store ? 1 : 0);
+                 ArgumentBits::encode(is_store ? 1 : 0) |
+                 IsLazyAccessorBits::encode(is_lazy);
   }
 
  protected:
@@ -1402,6 +1461,7 @@
 
  private:
   bool is_store() const { return IsStoreBits::decode(minor_key_); }
+  bool is_lazy() const { return IsLazyAccessorBits::decode(minor_key_); }
   bool call_data_undefined() const {
     return CallDataUndefinedBits::decode(minor_key_);
   }
@@ -1410,6 +1470,7 @@
   class IsStoreBits: public BitField<bool, 0, 1> {};
   class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
   class ArgumentBits : public BitField<int, 2, kArgBits> {};
+  class IsLazyAccessorBits : public BitField<bool, 3 + kArgBits, 1> {};
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiAccessor);
   DEFINE_PLATFORM_CODE_STUB(CallApiAccessor, PlatformCodeStub);
@@ -1445,9 +1506,9 @@
 
 class BinaryOpICStub : public HydrogenCodeStub {
  public:
-  BinaryOpICStub(Isolate* isolate, Token::Value op, Strength strength)
+  BinaryOpICStub(Isolate* isolate, Token::Value op)
       : HydrogenCodeStub(isolate, UNINITIALIZED) {
-    BinaryOpICState state(isolate, op, strength);
+    BinaryOpICState state(isolate, op);
     set_sub_minor_key(state.GetExtraICState());
   }
 
@@ -1528,9 +1589,8 @@
 
 class BinaryOpWithAllocationSiteStub final : public BinaryOpICStub {
  public:
-  BinaryOpWithAllocationSiteStub(Isolate* isolate, Token::Value op,
-                                 Strength strength)
-      : BinaryOpICStub(isolate, op, strength) {}
+  BinaryOpWithAllocationSiteStub(Isolate* isolate, Token::Value op)
+      : BinaryOpICStub(isolate, op) {}
 
   BinaryOpWithAllocationSiteStub(Isolate* isolate, const BinaryOpICState& state)
       : BinaryOpICStub(isolate, state) {}
@@ -1581,13 +1641,11 @@
 
 class CompareICStub : public PlatformCodeStub {
  public:
-  CompareICStub(Isolate* isolate, Token::Value op, Strength strength,
-                CompareICState::State left, CompareICState::State right,
-                CompareICState::State state)
+  CompareICStub(Isolate* isolate, Token::Value op, CompareICState::State left,
+                CompareICState::State right, CompareICState::State state)
       : PlatformCodeStub(isolate) {
     DCHECK(Token::IsCompareOp(op));
     minor_key_ = OpBits::encode(op - Token::EQ) |
-                 StrengthBits::encode(is_strong(strength)) |
                  LeftStateBits::encode(left) | RightStateBits::encode(right) |
                  StateBits::encode(state);
   }
@@ -1600,10 +1658,6 @@
     return static_cast<Token::Value>(Token::EQ + OpBits::decode(minor_key_));
   }
 
-  Strength strength() const {
-    return StrengthBits::decode(minor_key_) ? Strength::STRONG : Strength::WEAK;
-  }
-
   CompareICState::State left() const {
     return LeftStateBits::decode(minor_key_);
   }
@@ -1636,10 +1690,9 @@
   }
 
   class OpBits : public BitField<int, 0, 3> {};
-  class StrengthBits : public BitField<bool, 3, 1> {};
-  class LeftStateBits : public BitField<CompareICState::State, 4, 4> {};
-  class RightStateBits : public BitField<CompareICState::State, 8, 4> {};
-  class StateBits : public BitField<CompareICState::State, 12, 4> {};
+  class LeftStateBits : public BitField<CompareICState::State, 3, 4> {};
+  class RightStateBits : public BitField<CompareICState::State, 7, 4> {};
+  class StateBits : public BitField<CompareICState::State, 11, 4> {};
 
   Handle<Map> known_map_;
 
@@ -1746,10 +1799,8 @@
       : PlatformCodeStub(isolate) {
     minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs) |
                  ArgvMode::encode(argv_mode == kArgvInRegister);
-    DCHECK(result_size == 1 || result_size == 2);
-#if _WIN64 || V8_TARGET_ARCH_PPC
+    DCHECK(result_size == 1 || result_size == 2 || result_size == 3);
     minor_key_ = ResultSizeBits::update(minor_key_, result_size);
-#endif  // _WIN64
   }
 
   // The version of this stub that doesn't save doubles is generated ahead of
@@ -1761,9 +1812,7 @@
  private:
   bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
   bool argv_in_register() const { return ArgvMode::decode(minor_key_); }
-#if _WIN64 || V8_TARGET_ARCH_PPC
   int result_size() const { return ResultSizeBits::decode(minor_key_); }
-#endif  // _WIN64
 
   bool NeedsImmovableCode() override;
 
@@ -1805,67 +1854,6 @@
 };
 
 
-class ArgumentsAccessStub: public PlatformCodeStub {
- public:
-  enum Type {
-    READ_ELEMENT,
-    NEW_SLOPPY_FAST,
-    NEW_SLOPPY_SLOW,
-    NEW_STRICT
-  };
-
-  ArgumentsAccessStub(Isolate* isolate, Type type) : PlatformCodeStub(isolate) {
-    minor_key_ = TypeBits::encode(type);
-  }
-
-  CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
-    if (type() == READ_ELEMENT) {
-      return ArgumentsAccessReadDescriptor(isolate());
-    } else {
-      return ArgumentsAccessNewDescriptor(isolate());
-    }
-  }
-
-  static Type ComputeType(bool is_unmapped, bool has_duplicate_parameters) {
-    if (is_unmapped) {
-      return Type::NEW_STRICT;
-    } else if (has_duplicate_parameters) {
-      return Type::NEW_SLOPPY_SLOW;
-    } else {
-      return Type::NEW_SLOPPY_FAST;
-    }
-  }
-
- private:
-  Type type() const { return TypeBits::decode(minor_key_); }
-
-  void GenerateReadElement(MacroAssembler* masm);
-  void GenerateNewStrict(MacroAssembler* masm);
-  void GenerateNewSloppyFast(MacroAssembler* masm);
-  void GenerateNewSloppySlow(MacroAssembler* masm);
-
-  void PrintName(std::ostream& os) const override;  // NOLINT
-
-  class TypeBits : public BitField<Type, 0, 2> {};
-
-  DEFINE_PLATFORM_CODE_STUB(ArgumentsAccess, PlatformCodeStub);
-};
-
-
-class RestParamAccessStub : public PlatformCodeStub {
- public:
-  explicit RestParamAccessStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
-  void GenerateNew(MacroAssembler* masm);
-
-  void PrintName(std::ostream& os) const override;  // NOLINT
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(RestParamAccess);
-  DEFINE_PLATFORM_CODE_STUB(RestParamAccess, PlatformCodeStub);
-};
-
-
 class RegExpExecStub: public PlatformCodeStub {
  public:
   explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
@@ -2096,10 +2084,6 @@
     return LoadWithVectorDescriptor(isolate());
   }
 
-  LanguageMode language_mode() const {
-    return LoadICState::GetLanguageMode(MinorKey());
-  }
-
   DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
 };
 
@@ -2114,10 +2098,6 @@
   Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
   InlineCacheState GetICState() const override { return GENERIC; }
 
-  LanguageMode language_mode() const {
-    return LoadICState::GetLanguageMode(MinorKey());
-  }
-
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
 
   DEFINE_HYDROGEN_CODE_STUB(KeyedLoadGeneric, HydrogenCodeStub);
@@ -2724,6 +2704,9 @@
   StoreElementStub(Isolate* isolate, ElementsKind elements_kind,
                    KeyedAccessStoreMode mode)
       : PlatformCodeStub(isolate) {
+    // TODO(jkummerow): Rename this stub to StoreSlowElementStub,
+    // drop elements_kind parameter.
+    DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind);
     minor_key_ = ElementsKindBits::encode(elements_kind) |
                  CommonStoreModeBits::encode(mode);
   }
@@ -2950,6 +2933,15 @@
 };
 
 
+class ToNameStub final : public PlatformCodeStub {
+ public:
+  explicit ToNameStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ToName);
+  DEFINE_PLATFORM_CODE_STUB(ToName, PlatformCodeStub);
+};
+
+
 class ToObjectStub final : public HydrogenCodeStub {
  public:
   explicit ToObjectStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
diff --git a/src/codegen.cc b/src/codegen.cc
index a57cbb3..692fa64 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -124,18 +124,9 @@
                                              CompilationInfo* info) {
   Isolate* isolate = info->isolate();
 
-  Code::Flags flags;
-  if (info->IsStub() && info->code_stub()) {
-    DCHECK_EQ(info->output_code_kind(), info->code_stub()->GetCodeKind());
-    flags = Code::ComputeFlags(
-        info->output_code_kind(), info->code_stub()->GetICState(),
-        info->code_stub()->GetExtraICState(), info->code_stub()->GetStubType());
-  } else {
-    flags = Code::ComputeFlags(info->output_code_kind());
-  }
-
   // Allocate and install the code.
   CodeDesc desc;
+  Code::Flags flags = info->code_flags();
   bool is_crankshafted =
       Code::ExtractKindFromFlags(flags) == Code::OPTIMIZED_FUNCTION ||
       info->IsStub();
diff --git a/src/compiler.cc b/src/compiler.cc
index 307b3b0..c47e1b7 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -37,16 +37,6 @@
 namespace v8 {
 namespace internal {
 
-std::ostream& operator<<(std::ostream& os, const SourcePosition& p) {
-  if (p.IsUnknown()) {
-    return os << "<?>";
-  } else if (FLAG_hydrogen_track_positions) {
-    return os << "<" << p.inlining_id() << ":" << p.position() << ">";
-  } else {
-    return os << "<0:" << p.raw() << ">";
-  }
-}
-
 
 #define PARSE_INFO_GETTER(type, name)  \
   type CompilationInfo::name() const { \
@@ -120,8 +110,8 @@
 
 
 CompilationInfo::CompilationInfo(ParseInfo* parse_info)
-    : CompilationInfo(parse_info, nullptr, nullptr, BASE, parse_info->isolate(),
-                      parse_info->zone()) {
+    : CompilationInfo(parse_info, nullptr, Code::ComputeFlags(Code::FUNCTION),
+                      BASE, parse_info->isolate(), parse_info->zone()) {
   // Compiling for the snapshot typically results in different code than
   // compiling later on. This means that code recompiled with deoptimization
   // support won't be "equivalent" (as defined by SharedFunctionInfo::
@@ -148,23 +138,17 @@
 }
 
 
-CompilationInfo::CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone)
-    : CompilationInfo(nullptr, stub, CodeStub::MajorName(stub->MajorKey()),
-                      STUB, isolate, zone) {}
-
 CompilationInfo::CompilationInfo(const char* debug_name, Isolate* isolate,
-                                 Zone* zone)
-    : CompilationInfo(nullptr, nullptr, debug_name, STUB, isolate, zone) {
-  set_output_code_kind(Code::STUB);
-}
+                                 Zone* zone, Code::Flags code_flags)
+    : CompilationInfo(nullptr, debug_name, code_flags, STUB, isolate, zone) {}
 
-CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
-                                 const char* debug_name, Mode mode,
+CompilationInfo::CompilationInfo(ParseInfo* parse_info, const char* debug_name,
+                                 Code::Flags code_flags, Mode mode,
                                  Isolate* isolate, Zone* zone)
     : parse_info_(parse_info),
       isolate_(isolate),
       flags_(0),
-      code_stub_(code_stub),
+      code_flags_(code_flags),
       mode_(mode),
       osr_ast_id_(BailoutId::None()),
       zone_(zone),
@@ -178,19 +162,7 @@
       parameter_count_(0),
       optimization_id_(-1),
       osr_expr_stack_height_(0),
-      debug_name_(debug_name) {
-  // Parameter count is number of stack parameters.
-  if (code_stub_ != NULL) {
-    CodeStubDescriptor descriptor(code_stub_);
-    parameter_count_ = descriptor.GetStackParameterCount();
-    if (descriptor.function_mode() == NOT_JS_FUNCTION_STUB_MODE) {
-      parameter_count_--;
-    }
-    set_output_code_kind(code_stub->GetCodeKind());
-  } else {
-    set_output_code_kind(Code::FUNCTION);
-  }
-}
+      debug_name_(debug_name) {}
 
 
 CompilationInfo::~CompilationInfo() {
@@ -307,10 +279,13 @@
 
 
 base::SmartArrayPointer<char> CompilationInfo::GetDebugName() const {
-  if (parse_info()) {
+  if (parse_info() && parse_info()->literal()) {
     AllowHandleDereference allow_deref;
     return parse_info()->literal()->debug_name()->ToCString();
   }
+  if (parse_info() && !parse_info()->shared_info().is_null()) {
+    return parse_info()->shared_info()->DebugName()->ToCString();
+  }
   const char* str = debug_name_ ? debug_name_ : "unknown";
   size_t len = strlen(str) + 1;
   base::SmartArrayPointer<char> name(new char[len]);
@@ -446,10 +421,14 @@
     if (info()->shared_info()->asm_function()) {
       if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
       info()->MarkAsFunctionContextSpecializing();
-    } else if (info()->has_global_object() &&
-               FLAG_native_context_specialization) {
-      info()->MarkAsNativeContextSpecializing();
-      info()->MarkAsTypingEnabled();
+    } else {
+      if (!FLAG_always_opt) {
+        info()->MarkAsBailoutOnUninitialized();
+      }
+      if (FLAG_native_context_specialization) {
+        info()->MarkAsNativeContextSpecializing();
+        info()->MarkAsTypingEnabled();
+      }
     }
     if (!info()->shared_info()->asm_function() ||
         FLAG_turbo_asm_deoptimization) {
@@ -755,7 +734,6 @@
   }
 }
 
-
 static bool CompileUnoptimizedCode(CompilationInfo* info) {
   DCHECK(AllowCompilation::IsAllowed(info->isolate()));
   if (!Compiler::Analyze(info->parse_info()) ||
@@ -768,32 +746,12 @@
 }
 
 
-// TODO(rmcilroy): Remove this temporary work-around when ignition supports
-// catch and eval.
-static bool IgnitionShouldFallbackToFullCodeGen(Scope* scope) {
-  if (scope->is_eval_scope() || scope->is_catch_scope() ||
-      scope->calls_eval()) {
-    return true;
-  }
-  for (auto inner_scope : *scope->inner_scopes()) {
-    if (IgnitionShouldFallbackToFullCodeGen(inner_scope)) return true;
-  }
-  return false;
-}
-
-
 static bool UseIgnition(CompilationInfo* info) {
   // Cannot use Ignition when the {function_data} is already used.
   if (info->has_shared_info() && info->shared_info()->HasBuiltinFunctionId()) {
     return false;
   }
 
-  // Checks whether the scope chain is supported.
-  if (FLAG_ignition_fallback_on_eval_and_catch &&
-      IgnitionShouldFallbackToFullCodeGen(info->scope())) {
-    return false;
-  }
-
   // Checks whether top level functions should be passed by the filter.
   if (info->closure().is_null()) {
     Vector<const char> filter = CStrVector(FLAG_ignition_filter);
@@ -804,13 +762,39 @@
   return info->closure()->PassesFilter(FLAG_ignition_filter);
 }
 
+static int CodeAndMetadataSize(CompilationInfo* info) {
+  int size = 0;
+  if (info->has_bytecode_array()) {
+    Handle<BytecodeArray> bytecode_array = info->bytecode_array();
+    size += bytecode_array->BytecodeArraySize();
+    size += bytecode_array->constant_pool()->Size();
+    size += bytecode_array->handler_table()->Size();
+    size += bytecode_array->source_position_table()->Size();
+  } else {
+    Handle<Code> code = info->code();
+    size += code->CodeSize();
+    size += code->relocation_info()->Size();
+    size += code->deoptimization_data()->Size();
+    size += code->handler_table()->Size();
+  }
+  return size;
+}
+
 
 static bool GenerateBaselineCode(CompilationInfo* info) {
+  bool success;
   if (FLAG_ignition && UseIgnition(info)) {
-    return interpreter::Interpreter::MakeBytecode(info);
+    success = interpreter::Interpreter::MakeBytecode(info);
   } else {
-    return FullCodeGenerator::MakeCode(info);
+    success = FullCodeGenerator::MakeCode(info);
   }
+  if (success) {
+    Isolate* isolate = info->isolate();
+    Counters* counters = isolate->counters();
+    counters->total_baseline_code_size()->Increment(CodeAndMetadataSize(info));
+    counters->total_baseline_compile_count()->Increment(1);
+  }
+  return success;
 }
 
 
@@ -947,10 +931,13 @@
 static bool GetOptimizedCodeNow(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
   CanonicalHandleScope canonical(isolate);
+  TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
+  TRACE_EVENT0("v8", "V8.OptimizeCode");
 
   if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
 
   TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
+  TRACE_EVENT0("v8", "V8.RecompileSynchronous");
 
   OptimizedCompileJob job(info);
   if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED ||
@@ -976,6 +963,8 @@
 static bool GetOptimizedCodeLater(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
   CanonicalHandleScope canonical(isolate);
+  TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
+  TRACE_EVENT0("v8", "V8.OptimizeCode");
 
   if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
     if (FLAG_trace_concurrent_recompilation) {
@@ -994,6 +983,7 @@
   info->parse_info()->ReopenHandlesInNewHandleScope();
 
   TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+  TRACE_EVENT0("v8", "V8.RecompileSynchronous");
 
   OptimizedCompileJob* job = new (info->zone()) OptimizedCompileJob(info);
   OptimizedCompileJob::Status status = job->CreateGraph();
@@ -1033,6 +1023,8 @@
   Isolate* isolate = function->GetIsolate();
   DCHECK(!isolate->has_pending_exception());
   DCHECK(!function->is_compiled());
+  TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
+  TRACE_EVENT0("v8", "V8.CompileCode");
   AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
   // If the debugger is active, do not compile with turbofan unless we can
   // deopt from turbofan code.
@@ -1044,7 +1036,7 @@
     VMState<COMPILER> state(isolate);
     PostponeInterruptsScope postpone(isolate);
 
-    info.SetOptimizing(BailoutId::None(), handle(function->shared()->code()));
+    info.SetOptimizing();
 
     if (GetOptimizedCodeNow(&info)) {
       DCHECK(function->shared()->is_compiled());
@@ -1066,9 +1058,8 @@
 
   if (FLAG_always_opt) {
     Handle<Code> opt_code;
-    if (Compiler::GetOptimizedCode(
-            function, result,
-            Compiler::NOT_CONCURRENT).ToHandle(&opt_code)) {
+    if (Compiler::GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
+            .ToHandle(&opt_code)) {
       result = opt_code;
     }
   }
@@ -1241,6 +1232,8 @@
 
 static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
+  TimerEventScope<TimerEventCompileCode> timer(isolate);
+  TRACE_EVENT0("v8", "V8.CompileCode");
   PostponeInterruptsScope postpone(isolate);
   DCHECK(!isolate->native_context().is_null());
   ParseInfo* parse_info = info->parse_info();
@@ -1300,6 +1293,7 @@
           ? info->isolate()->counters()->compile_eval()
           : info->isolate()->counters()->compile();
     HistogramTimerScope timer(rate);
+    TRACE_EVENT0("v8", info->is_eval() ? "V8.CompileEval" : "V8.Compile");
 
     // Compile the code.
     if (!CompileBaselineCode(info)) {
@@ -1470,6 +1464,7 @@
         !isolate->debug()->is_loaded()) {
       // Then check cached code provided by embedder.
       HistogramTimerScope timer(isolate->counters()->compile_deserialize());
+      TRACE_EVENT0("v8", "V8.CompileDeserialize");
       Handle<SharedFunctionInfo> result;
       if (CodeSerializer::Deserialize(isolate, *cached_data, source)
               .ToHandle(&result)) {
@@ -1495,6 +1490,9 @@
     if (natives == NATIVES_CODE) {
       script->set_type(Script::TYPE_NATIVE);
       script->set_hide_source(true);
+    } else if (natives == EXTENSION_CODE) {
+      script->set_type(Script::TYPE_EXTENSION);
+      script->set_hide_source(true);
     }
     if (!script_name.is_null()) {
       script->set_name(*script_name);
@@ -1535,6 +1533,7 @@
           compile_options == ScriptCompiler::kProduceCodeCache) {
         HistogramTimerScope histogram_timer(
             isolate->counters()->compile_serialize());
+        TRACE_EVENT0("v8", "V8.CompileSerialize");
         *cached_data = CodeSerializer::Serialize(isolate, result, source);
         if (FLAG_profile_deserialization) {
           PrintF("[Compiling and serializing took %0.3f ms]\n",
@@ -1635,6 +1634,8 @@
   bool lazy = FLAG_lazy && allow_lazy && !literal->should_eager_compile();
 
   // Generate code
+  TimerEventScope<TimerEventCompileCode> timer(isolate);
+  TRACE_EVENT0("v8", "V8.CompileCode");
   Handle<ScopeInfo> scope_info;
   if (lazy) {
     Handle<Code> code = isolate->builtins()->CompileLazy();
@@ -1700,9 +1701,39 @@
   return existing;
 }
 
+Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForNative(
+    v8::Extension* extension, Handle<String> name) {
+  Isolate* isolate = name->GetIsolate();
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+
+  // Compute the function template for the native function.
+  v8::Local<v8::FunctionTemplate> fun_template =
+      extension->GetNativeFunctionTemplate(v8_isolate,
+                                           v8::Utils::ToLocal(name));
+  DCHECK(!fun_template.IsEmpty());
+
+  // Instantiate the function and create a shared function info from it.
+  Handle<JSFunction> fun = Handle<JSFunction>::cast(Utils::OpenHandle(
+      *fun_template->GetFunction(v8_isolate->GetCurrentContext())
+           .ToLocalChecked()));
+  const int literals = fun->NumberOfLiterals();
+  Handle<Code> code = Handle<Code>(fun->shared()->code());
+  Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
+  Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
+      name, literals, FunctionKind::kNormalFunction, code,
+      Handle<ScopeInfo>(fun->shared()->scope_info()),
+      Handle<TypeFeedbackVector>(fun->shared()->feedback_vector()));
+  shared->set_construct_stub(*construct_stub);
+
+  // Copy the function data to the shared function info.
+  shared->set_function_data(fun->shared()->function_data());
+  int parameters = fun->shared()->internal_formal_parameter_count();
+  shared->set_internal_formal_parameter_count(parameters);
+
+  return shared;
+}
 
 MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
-                                             Handle<Code> current_code,
                                              ConcurrencyMode mode,
                                              BailoutId osr_ast_id,
                                              JavaScriptFrame* osr_frame) {
@@ -1726,6 +1757,7 @@
 
   DCHECK(AllowCompilation::IsAllowed(isolate));
 
+  Handle<Code> current_code(shared->code());
   if (!shared->is_compiled() ||
       shared->scope_info() == ScopeInfo::Empty(isolate)) {
     // The function was never compiled. Compile it unoptimized first.
@@ -1758,7 +1790,7 @@
   DCHECK(!isolate->has_pending_exception());
   PostponeInterruptsScope postpone(isolate);
 
-  info->SetOptimizing(osr_ast_id, current_code);
+  info->SetOptimizingForOsr(osr_ast_id, current_code);
 
   if (mode == CONCURRENT) {
     if (GetOptimizedCodeLater(info.get())) {
@@ -1774,8 +1806,8 @@
   return MaybeHandle<Code>();
 }
 
-
-Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
+MaybeHandle<Code> Compiler::GetConcurrentlyOptimizedCode(
+    OptimizedCompileJob* job) {
   // Take ownership of compilation info.  Deleting compilation info
   // also tears down the zone and the recompile job.
   base::SmartPointer<CompilationInfo> info(job->info());
@@ -1783,6 +1815,7 @@
 
   VMState<COMPILER> state(isolate);
   TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+  TRACE_EVENT0("v8", "V8.RecompileSynchronous");
 
   Handle<SharedFunctionInfo> shared = info->shared_info();
   shared->code()->set_profiler_ticks(0);
@@ -1820,7 +1853,7 @@
     info->closure()->ShortPrint();
     PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
   }
-  return Handle<Code>::null();
+  return MaybeHandle<Code>();
 }
 
 
diff --git a/src/compiler.h b/src/compiler.h
index 9b43939..a56fa13 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -10,79 +10,18 @@
 #include "src/bailout-reason.h"
 #include "src/compilation-dependencies.h"
 #include "src/signature.h"
+#include "src/source-position.h"
 #include "src/zone.h"
 
 namespace v8 {
 namespace internal {
 
-class AstValueFactory;
-class HydrogenCodeStub;
+// Forward declarations.
 class JavaScriptFrame;
 class ParseInfo;
 class ScriptData;
 
 
-// This class encapsulates encoding and decoding of sources positions from
-// which hydrogen values originated.
-// When FLAG_track_hydrogen_positions is set this object encodes the
-// identifier of the inlining and absolute offset from the start of the
-// inlined function.
-// When the flag is not set we simply track absolute offset from the
-// script start.
-class SourcePosition {
- public:
-  static SourcePosition Unknown() {
-    return SourcePosition::FromRaw(kNoPosition);
-  }
-
-  bool IsUnknown() const { return value_ == kNoPosition; }
-
-  uint32_t position() const { return PositionField::decode(value_); }
-  void set_position(uint32_t position) {
-    if (FLAG_hydrogen_track_positions) {
-      value_ = static_cast<uint32_t>(PositionField::update(value_, position));
-    } else {
-      value_ = position;
-    }
-  }
-
-  uint32_t inlining_id() const { return InliningIdField::decode(value_); }
-  void set_inlining_id(uint32_t inlining_id) {
-    if (FLAG_hydrogen_track_positions) {
-      value_ =
-          static_cast<uint32_t>(InliningIdField::update(value_, inlining_id));
-    }
-  }
-
-  uint32_t raw() const { return value_; }
-
- private:
-  static const uint32_t kNoPosition =
-      static_cast<uint32_t>(RelocInfo::kNoPosition);
-  typedef BitField<uint32_t, 0, 9> InliningIdField;
-
-  // Offset from the start of the inlined function.
-  typedef BitField<uint32_t, 9, 23> PositionField;
-
-  friend class HPositionInfo;
-  friend class Deoptimizer;
-
-  static SourcePosition FromRaw(uint32_t raw_position) {
-    SourcePosition position;
-    position.value_ = raw_position;
-    return position;
-  }
-
-  // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
-  // and PositionField.
-  // Otherwise contains absolute offset from the script start.
-  uint32_t value_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const SourcePosition& p);
-
-
 struct InlinedFunctionInfo {
   InlinedFunctionInfo(int parent_id, SourcePosition inline_position,
                       int script_id, int start_position)
@@ -125,11 +64,12 @@
     kDeoptimizationEnabled = 1 << 16,
     kSourcePositionsEnabled = 1 << 17,
     kFirstCompile = 1 << 18,
+    kBailoutOnUninitialized = 1 << 19,
   };
 
   explicit CompilationInfo(ParseInfo* parse_info);
-  CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone);
-  CompilationInfo(const char* debug_name, Isolate* isolate, Zone* zone);
+  CompilationInfo(const char* debug_name, Isolate* isolate, Zone* zone,
+                  Code::Flags code_flags = Code::ComputeFlags(Code::STUB));
   virtual ~CompilationInfo();
 
   ParseInfo* parse_info() const { return parse_info_; }
@@ -159,7 +99,7 @@
   Zone* zone() { return zone_; }
   bool is_osr() const { return !osr_ast_id_.IsNone(); }
   Handle<Code> code() const { return code_; }
-  CodeStub* code_stub() const { return code_stub_; }
+  Code::Flags code_flags() const { return code_flags_; }
   BailoutId osr_ast_id() const { return osr_ast_id_; }
   Handle<Code> unoptimized_code() const { return unoptimized_code_; }
   int opt_count() const { return opt_count_; }
@@ -268,12 +208,18 @@
 
   bool is_first_compile() const { return GetFlag(kFirstCompile); }
 
+  void MarkAsBailoutOnUninitialized() { SetFlag(kBailoutOnUninitialized); }
+
+  bool is_bailout_on_uninitialized() const {
+    return GetFlag(kBailoutOnUninitialized);
+  }
+
   bool GeneratePreagedPrologue() const {
     // Generate a pre-aged prologue if we are optimizing for size, which
     // will make code flushing more aggressive. Only apply to Code::FUNCTION,
     // since StaticMarkingVisitor::IsFlushable only flushes proper functions.
     return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
-           !is_debug() && output_code_kind_ == Code::FUNCTION;
+           !is_debug() && output_code_kind() == Code::FUNCTION;
   }
 
   void EnsureFeedbackVector();
@@ -308,13 +254,17 @@
   // Accessors for the different compilation modes.
   bool IsOptimizing() const { return mode_ == OPTIMIZE; }
   bool IsStub() const { return mode_ == STUB; }
-  void SetOptimizing(BailoutId osr_ast_id, Handle<Code> unoptimized) {
+  void SetOptimizing() {
     DCHECK(has_shared_info());
     SetMode(OPTIMIZE);
+    optimization_id_ = isolate()->NextOptimizationId();
+    code_flags_ =
+        Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
+  }
+  void SetOptimizingForOsr(BailoutId osr_ast_id, Handle<Code> unoptimized) {
+    SetOptimizing();
     osr_ast_id_ = osr_ast_id;
     unoptimized_code_ = unoptimized;
-    optimization_id_ = isolate()->NextOptimizationId();
-    set_output_code_kind(Code::OPTIMIZED_FUNCTION);
   }
 
   // Deoptimization support.
@@ -423,9 +373,9 @@
 
   base::SmartArrayPointer<char> GetDebugName() const;
 
-  Code::Kind output_code_kind() const { return output_code_kind_; }
-
-  void set_output_code_kind(Code::Kind kind) { output_code_kind_ = kind; }
+  Code::Kind output_code_kind() const {
+    return Code::ExtractKindFromFlags(code_flags_);
+  }
 
  protected:
   ParseInfo* parse_info_;
@@ -446,8 +396,8 @@
     STUB
   };
 
-  CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
-                  const char* debug_name, Mode mode, Isolate* isolate,
+  CompilationInfo(ParseInfo* parse_info, const char* debug_name,
+                  Code::Flags code_flags, Mode mode, Isolate* isolate,
                   Zone* zone);
 
   Isolate* isolate_;
@@ -466,10 +416,8 @@
 
   unsigned flags_;
 
-  Code::Kind output_code_kind_;
+  Code::Flags code_flags_;
 
-  // For compiled stubs, the stub object
-  CodeStub* code_stub_;
   // The compiled code.
   Handle<Code> code_;
 
@@ -683,19 +631,24 @@
   static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
       FunctionLiteral* node, Handle<Script> script, CompilationInfo* outer);
 
+  // Create a shared function info object for a native function literal.
+  static Handle<SharedFunctionInfo> GetSharedFunctionInfoForNative(
+      v8::Extension* extension, Handle<String> name);
+
   enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
 
   // Generate and return optimized code or start a concurrent optimization job.
   // In the latter case, return the InOptimizationQueue builtin.  On failure,
   // return the empty handle.
   MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCode(
-      Handle<JSFunction> function, Handle<Code> current_code,
-      ConcurrencyMode mode, BailoutId osr_ast_id = BailoutId::None(),
+      Handle<JSFunction> function, ConcurrencyMode mode,
+      BailoutId osr_ast_id = BailoutId::None(),
       JavaScriptFrame* osr_frame = nullptr);
 
   // Generate and return code from previously queued optimization job.
   // On failure, return the empty handle.
-  static Handle<Code> GetConcurrentlyOptimizedCode(OptimizedCompileJob* job);
+  MUST_USE_RESULT static MaybeHandle<Code> GetConcurrentlyOptimizedCode(
+      OptimizedCompileJob* job);
 };
 
 
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index ebd2789..722bbf0 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -6,9 +6,9 @@
 
 #include "src/contexts.h"
 #include "src/frames.h"
+#include "src/handles-inl.h"
 #include "src/heap/heap.h"
 #include "src/type-cache.h"
-#include "src/types-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -268,20 +268,16 @@
 
 // static
 FieldAccess AccessBuilder::ForArgumentsLength() {
-  int offset =
-      JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
-  FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase, JSArgumentsObject::kLengthOffset,
+                        Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForArgumentsCallee() {
-  int offset =
-      JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
-  FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
-                        MachineType::AnyTagged()};
+  FieldAccess access = {kTaggedBase, JSSloppyArgumentsObject::kCalleeOffset,
+                        Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
   return access;
 }
 
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
index 612170e..4a2a857 100644
--- a/src/compiler/access-info.cc
+++ b/src/compiler/access-info.cc
@@ -8,9 +8,9 @@
 #include "src/compilation-dependencies.h"
 #include "src/compiler/access-info.h"
 #include "src/field-index-inl.h"
+#include "src/field-type.h"
 #include "src/objects-inl.h"  // TODO(mstarzinger): Temporary cycle breaker!
 #include "src/type-cache.h"
-#include "src/types-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -232,6 +232,9 @@
   // Compute the receiver type.
   Handle<Map> receiver_map = map;
 
+  // Property lookups require the name to be internalized.
+  name = isolate()->factory()->InternalizeName(name);
+
   // We support fast inline cases for certain JSObject getters.
   if (access_mode == AccessMode::kLoad &&
       LookupSpecialFieldAccessor(map, name, access_info)) {
@@ -242,7 +245,7 @@
   do {
     // Lookup the named property on the {map}.
     Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
-    int const number = descriptors->SearchWithCache(*name, *map);
+    int const number = descriptors->SearchWithCache(isolate(), *name, *map);
     if (number != DescriptorArray::kNotFound) {
       PropertyDetails const details = descriptors->GetDetails(number);
       if (access_mode == AccessMode::kStore) {
@@ -277,8 +280,7 @@
           // Extract the field type from the property details (make sure its
           // representation is TaggedPointer to reflect the heap object case).
           field_type = Type::Intersect(
-              Type::Convert<HeapType>(
-                  handle(descriptors->GetFieldType(number), isolate()), zone()),
+              descriptors->GetFieldType(number)->Convert(zone()),
               Type::TaggedPointer(), zone());
           if (field_type->Is(Type::None())) {
             // Store is not safe if the field type was cleared.
@@ -454,10 +456,7 @@
       // Extract the field type from the property details (make sure its
       // representation is TaggedPointer to reflect the heap object case).
       field_type = Type::Intersect(
-          Type::Convert<HeapType>(
-              handle(
-                  transition_map->instance_descriptors()->GetFieldType(number),
-                  isolate()),
+          transition_map->instance_descriptors()->GetFieldType(number)->Convert(
               zone()),
           Type::TaggedPointer(), zone());
       if (field_type->Is(Type::None())) {
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index 9b074b0..bdf4c47 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -206,6 +206,19 @@
       : OutOfLineCode(gen),
         object_(object),
         index_(index),
+        index_immediate_(0),
+        value_(value),
+        scratch0_(scratch0),
+        scratch1_(scratch1),
+        mode_(mode) {}
+
+  OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
+                       Register value, Register scratch0, Register scratch1,
+                       RecordWriteMode mode)
+      : OutOfLineCode(gen),
+        object_(object),
+        index_(no_reg),
+        index_immediate_(index),
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
@@ -215,24 +228,36 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, eq,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    // TODO(turbofan): Once we get frame elision working, we need to save
-    // and restore lr properly here if the frame was elided.
+    if (!frame()->needs_frame()) {
+      // We need to save and restore lr if the frame was elided.
+      __ Push(lr);
+    }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
-    __ add(scratch1_, object_, index_);
+                         remembered_set_action, save_fp_mode);
+    if (index_.is(no_reg)) {
+      __ add(scratch1_, object_, Operand(index_immediate_));
+    } else {
+      DCHECK_EQ(0, index_immediate_);
+      __ add(scratch1_, object_, Operand(index_));
+    }
     __ CallStub(&stub);
+    if (!frame()->needs_frame()) {
+      __ Pop(lr);
+    }
   }
 
  private:
   Register const object_;
   Register const index_;
+  int32_t const index_immediate_;  // Valid if index_.is(no_reg).
   Register const value_;
   Register const scratch0_;
   Register const scratch1_;
@@ -449,11 +474,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
       __ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -514,6 +534,13 @@
       __ mov(i.OutputRegister(), fp);
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ ldr(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ mov(i.OutputRegister(), fp);
+      }
+      break;
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -522,19 +549,43 @@
       RecordWriteMode mode =
           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
       Register object = i.InputRegister(0);
-      Register index = i.InputRegister(1);
       Register value = i.InputRegister(2);
       Register scratch0 = i.TempRegister(0);
       Register scratch1 = i.TempRegister(1);
-      auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
-                                                   scratch0, scratch1, mode);
-      __ str(value, MemOperand(object, index));
+      OutOfLineRecordWrite* ool;
+
+      AddressingMode addressing_mode =
+          AddressingModeField::decode(instr->opcode());
+      if (addressing_mode == kMode_Offset_RI) {
+        int32_t index = i.InputInt32(1);
+        ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+                                                scratch0, scratch1, mode);
+        __ str(value, MemOperand(object, index));
+      } else {
+        DCHECK_EQ(kMode_Offset_RR, addressing_mode);
+        Register index(i.InputRegister(1));
+        ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+                                                scratch0, scratch1, mode);
+        __ str(value, MemOperand(object, index));
+      }
       __ CheckPageFlag(object, scratch0,
                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
                        ool->entry());
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      Register base;
+      if (offset.from_stack_pointer()) {
+        base = sp;
+      } else {
+        base = fp;
+      }
+      __ add(i.OutputRegister(0), base, Operand(offset.offset()));
+      break;
+    }
     case kArmAdd:
       __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
              i.OutputSBit());
@@ -622,6 +673,13 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmSbfx: {
+      CpuFeatureScope scope(masm(), ARMv7);
+      __ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+              i.InputInt8(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
     case kArmSxtb:
       __ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -658,6 +716,12 @@
                i.InputInt32(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
+    case kArmRbit: {
+      CpuFeatureScope scope(masm(), ARMv7);
+      __ rbit(i.OutputRegister(), i.InputRegister(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
     case kArmClz:
       __ clz(i.OutputRegister(), i.InputRegister(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -831,6 +895,20 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmVcvtF32S32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vmov(scratch, i.InputRegister(0));
+      __ vcvt_f32_s32(i.OutputFloat32Register(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtF32U32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vmov(scratch, i.InputRegister(0));
+      __ vcvt_f32_u32(i.OutputFloat32Register(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
     case kArmVcvtF64S32: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vmov(scratch, i.InputRegister(0));
@@ -845,6 +923,20 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmVcvtS32F32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vcvt_s32_f32(scratch, i.InputFloat32Register(0));
+      __ vmov(i.OutputRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtU32F32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vcvt_u32_f32(scratch, i.InputFloat32Register(0));
+      __ vmov(i.OutputRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
     case kArmVcvtS32F64: {
       SwVfpRegister scratch = kScratchDoubleReg.low();
       __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
@@ -1098,8 +1190,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index 401100b..50fa555 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -36,6 +36,7 @@
   V(ArmMvn)                        \
   V(ArmBfc)                        \
   V(ArmUbfx)                       \
+  V(ArmSbfx)                       \
   V(ArmSxtb)                       \
   V(ArmSxth)                       \
   V(ArmSxtab)                      \
@@ -43,6 +44,7 @@
   V(ArmUxtb)                       \
   V(ArmUxth)                       \
   V(ArmUxtab)                      \
+  V(ArmRbit)                       \
   V(ArmUxtah)                      \
   V(ArmVcmpF32)                    \
   V(ArmVaddF32)                    \
@@ -76,8 +78,12 @@
   V(ArmVrintnF64)                  \
   V(ArmVcvtF32F64)                 \
   V(ArmVcvtF64F32)                 \
+  V(ArmVcvtF32S32)                 \
+  V(ArmVcvtF32U32)                 \
   V(ArmVcvtF64S32)                 \
   V(ArmVcvtF64U32)                 \
+  V(ArmVcvtS32F32)                 \
+  V(ArmVcvtU32F32)                 \
   V(ArmVcvtS32F64)                 \
   V(ArmVcvtU32F64)                 \
   V(ArmVmovLowU32F64)              \
@@ -100,7 +106,6 @@
   V(ArmPush)                       \
   V(ArmPoke)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index f36802c..d950e8c 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -38,6 +38,7 @@
     case kArmMvn:
     case kArmBfc:
     case kArmUbfx:
+    case kArmSbfx:
     case kArmSxtb:
     case kArmSxth:
     case kArmSxtab:
@@ -46,6 +47,7 @@
     case kArmUxth:
     case kArmUxtab:
     case kArmUxtah:
+    case kArmRbit:
     case kArmVcmpF32:
     case kArmVaddF32:
     case kArmVsubF32:
@@ -78,8 +80,12 @@
     case kArmVrintnF64:
     case kArmVcvtF32F64:
     case kArmVcvtF64F32:
+    case kArmVcvtF32S32:
+    case kArmVcvtF32U32:
     case kArmVcvtF64S32:
     case kArmVcvtF64U32:
+    case kArmVcvtS32F32:
+    case kArmVcvtU32F32:
     case kArmVcvtS32F64:
     case kArmVcvtU32F64:
     case kArmVmovLowU32F64:
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index f3deae7..14b30b1 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -327,8 +327,9 @@
     case MachineRepresentation::kWord32:
       opcode = kArmLdr;
       break;
-    case MachineRepresentation::kNone:  // Fall through.
-    case MachineRepresentation::kWord64:
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
   }
@@ -355,10 +356,19 @@
 
   if (write_barrier_kind != kNoWriteBarrier) {
     DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
-    inputs[input_count++] = g.UseUniqueRegister(index);
+    // OutOfLineRecordWrite uses the index in an 'add' instruction as well as
+    // for the store itself, so we must check compatibility with both.
+    if (g.CanBeImmediate(index, kArmAdd) && g.CanBeImmediate(index, kArmStr)) {
+      inputs[input_count++] = g.UseImmediate(index);
+      addressing_mode = kMode_Offset_RI;
+    } else {
+      inputs[input_count++] = g.UseUniqueRegister(index);
+      addressing_mode = kMode_Offset_RR;
+    }
     inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
                                 ? g.UseRegister(value)
                                 : g.UseUniqueRegister(value);
@@ -380,6 +390,7 @@
     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
     size_t const temp_count = arraysize(temps);
     InstructionCode code = kArchStoreWithWriteBarrier;
+    code |= AddressingModeField::encode(addressing_mode);
     code |= MiscField::encode(static_cast<int>(record_write_mode));
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
@@ -402,8 +413,9 @@
       case MachineRepresentation::kWord32:
         opcode = kArmStr;
         break;
-      case MachineRepresentation::kNone:  // Fall through.
-      case MachineRepresentation::kWord64:
+      case MachineRepresentation::kWord64:   // Fall through.
+      case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
     }
@@ -442,9 +454,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -483,9 +496,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -551,43 +565,67 @@
   if (m.right().HasValue()) {
     uint32_t const value = m.right().Value();
     uint32_t width = base::bits::CountPopulation32(value);
-    uint32_t msb = base::bits::CountLeadingZeros32(value);
-    // Try to interpret this AND as UBFX.
-    if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
-      DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
-      if (m.left().IsWord32Shr()) {
-        Int32BinopMatcher mleft(m.left().node());
-        if (mleft.right().IsInRange(0, 31)) {
-          // UBFX cannot extract bits past the register size, however since
-          // shifting the original value would have introduced some zeros we can
-          // still use UBFX with a smaller mask and the remaining bits will be
-          // zeros.
-          uint32_t const lsb = mleft.right().Value();
-          return EmitUbfx(this, node, mleft.left().node(), lsb,
-                          std::min(width, 32 - lsb));
+    uint32_t leading_zeros = base::bits::CountLeadingZeros32(value);
+
+    // Try to merge SHR operations on the left hand input into this AND.
+    if (m.left().IsWord32Shr()) {
+      Int32BinopMatcher mshr(m.left().node());
+      if (mshr.right().HasValue()) {
+        uint32_t const shift = mshr.right().Value();
+
+        if (((shift == 8) || (shift == 16) || (shift == 24)) &&
+            ((value == 0xff) || (value == 0xffff))) {
+          // Merge SHR into AND by emitting a UXTB or UXTH instruction with a
+          // bytewise rotation.
+          Emit((value == 0xff) ? kArmUxtb : kArmUxth,
+               g.DefineAsRegister(m.node()), g.UseRegister(mshr.left().node()),
+               g.TempImmediate(mshr.right().Value()));
+          return;
+        } else if (IsSupported(ARMv7) && (width != 0) &&
+                   ((leading_zeros + width) == 32)) {
+          // Merge Shr into And by emitting a UBFX instruction.
+          DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
+          if ((1 <= shift) && (shift <= 31)) {
+            // UBFX cannot extract bits past the register size, however since
+            // shifting the original value would have introduced some zeros we
+            // can still use UBFX with a smaller mask and the remaining bits
+            // will be zeros.
+            EmitUbfx(this, node, mshr.left().node(), shift,
+                     std::min(width, 32 - shift));
+            return;
+          }
         }
       }
-      return EmitUbfx(this, node, m.left().node(), 0, width);
+    } else if (value == 0xffff) {
+      // Emit UXTH for this AND. We don't bother testing for UXTB, as it's no
+      // better than AND 0xff for this operation.
+      Emit(kArmUxth, g.DefineAsRegister(m.node()),
+           g.UseRegister(m.left().node()), g.TempImmediate(0));
+      return;
     }
-    // Try to interpret this AND as BIC.
     if (g.CanBeImmediate(~value)) {
+      // Emit BIC for this AND by inverting the immediate value first.
       Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.TempImmediate(~value));
       return;
     }
-    // Try to interpret this AND as UXTH.
-    if (value == 0xffff) {
-      Emit(kArmUxth, g.DefineAsRegister(m.node()),
-           g.UseRegister(m.left().node()), g.TempImmediate(0));
-      return;
-    }
-    // Try to interpret this AND as BFC.
-    if (IsSupported(ARMv7)) {
+    if (!g.CanBeImmediate(value) && IsSupported(ARMv7)) {
+      // If value has 9 to 23 contiguous set bits, and has the lsb set, we can
+      // replace this AND with UBFX. Other contiguous bit patterns have already
+      // been handled by BIC or will be handled by AND.
+      if ((width != 0) && ((leading_zeros + width) == 32) &&
+          (9 <= leading_zeros) && (leading_zeros <= 23)) {
+        DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
+        EmitUbfx(this, node, m.left().node(), 0, width);
+        return;
+      }
+
       width = 32 - width;
-      msb = base::bits::CountLeadingZeros32(~value);
+      leading_zeros = base::bits::CountLeadingZeros32(~value);
       uint32_t lsb = base::bits::CountTrailingZeros32(~value);
-      if (msb + width + lsb == 32) {
+      if ((leading_zeros + width + lsb) == 32) {
+        // This AND can be replaced with BFC.
         Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
              g.TempImmediate(lsb), g.TempImmediate(width));
         return;
@@ -699,14 +737,23 @@
   Int32BinopMatcher m(node);
   if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
     Int32BinopMatcher mleft(m.left().node());
-    if (mleft.right().Is(16) && m.right().Is(16)) {
-      Emit(kArmSxth, g.DefineAsRegister(node),
-           g.UseRegister(mleft.left().node()), g.TempImmediate(0));
-      return;
-    } else if (mleft.right().Is(24) && m.right().Is(24)) {
-      Emit(kArmSxtb, g.DefineAsRegister(node),
-           g.UseRegister(mleft.left().node()), g.TempImmediate(0));
-      return;
+    if (m.right().HasValue() && mleft.right().HasValue()) {
+      uint32_t sar = m.right().Value();
+      uint32_t shl = mleft.right().Value();
+      if ((sar == shl) && (sar == 16)) {
+        Emit(kArmSxth, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+        return;
+      } else if ((sar == shl) && (sar == 24)) {
+        Emit(kArmSxtb, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+        return;
+      } else if (IsSupported(ARMv7) && (sar >= shl)) {
+        Emit(kArmSbfx, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(sar - shl),
+             g.TempImmediate(32 - sar));
+        return;
+      }
     }
   }
   VisitShift(this, node, TryMatchASR);
@@ -726,6 +773,12 @@
 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) {
+  DCHECK(IsSupported(ARMv7));
+  VisitRR(this, kArmRbit, node);
+}
+
+
 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
 
 
@@ -921,6 +974,16 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kArmVcvtF32S32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kArmVcvtF32U32, node);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRR(this, kArmVcvtF64S32, node);
 }
@@ -931,6 +994,16 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kArmVcvtS32F32, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kArmVcvtU32F32, node);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   VisitRR(this, kArmVcvtS32F64, node);
 }
@@ -1591,6 +1664,9 @@
   MachineOperatorBuilder::Flags flags =
       MachineOperatorBuilder::kInt32DivIsSafe |
       MachineOperatorBuilder::kUint32DivIsSafe;
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    flags |= MachineOperatorBuilder::kWord32ReverseBits;
+  }
   if (CpuFeatures::IsSupported(ARMv8)) {
     flags |= MachineOperatorBuilder::kFloat32RoundDown |
              MachineOperatorBuilder::kFloat64RoundDown |
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index d356195..e45c677 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -270,7 +270,7 @@
 
 class OutOfLineRecordWrite final : public OutOfLineCode {
  public:
-  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+  OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
                        Register value, Register scratch0, Register scratch1,
                        RecordWriteMode mode)
       : OutOfLineCode(gen),
@@ -285,24 +285,30 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlagClear(value_, scratch0_,
-                            MemoryChunk::kPointersToHereAreInterestingMask,
-                            exit());
-    }
+    __ CheckPageFlagClear(value_, scratch0_,
+                          MemoryChunk::kPointersToHereAreInterestingMask,
+                          exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    // TODO(turbofan): Once we get frame elision working, we need to save
-    // and restore lr properly here if the frame was elided.
+    if (!frame()->needs_frame()) {
+      // We need to save and restore lr if the frame was elided.
+      __ Push(lr);
+    }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ Add(scratch1_, object_, index_);
     __ CallStub(&stub);
+    if (!frame()->needs_frame()) {
+      __ Pop(lr);
+    }
   }
 
  private:
   Register const object_;
-  Register const index_;
+  Operand const index_;
   Register const value_;
   Register const scratch0_;
   Register const scratch1_;
@@ -488,7 +494,8 @@
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   Arm64OperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
-  switch (ArchOpcodeField::decode(opcode)) {
+  ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+  switch (arch_opcode) {
     case kArchCallCodeObject: {
       EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
@@ -499,6 +506,14 @@
         __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
         __ Call(target);
       }
+      // TODO(titzer): this is ugly. JSSP should be a caller-save register
+      // in this case, but it is not possible to express in the register
+      // allocator.
+      CallDescriptor::Flags flags =
+          static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+      if (flags & CallDescriptor::kRestoreJSSP) {
+        __ mov(jssp, csp);
+      }
       frame_access_state()->ClearSPDelta();
       RecordCallPosition(instr);
       break;
@@ -530,6 +545,14 @@
       }
       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
       __ Call(x10);
+      // TODO(titzer): this is ugly. JSSP should be a caller-save register
+      // in this case, but it is not possible to express in the register
+      // allocator.
+      CallDescriptor::Flags flags =
+          static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+      if (flags & CallDescriptor::kRestoreJSSP) {
+        __ mov(jssp, csp);
+      }
       frame_access_state()->ClearSPDelta();
       RecordCallPosition(instr);
       break;
@@ -551,11 +574,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction:
       // We don't need kArchPrepareCallCFunction on arm64 as the instruction
       // selector already perform a Claim to reserve space on the stack and
@@ -609,14 +627,29 @@
     case kArchFramePointer:
       __ mov(i.OutputRegister(), fp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ ldr(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ mov(i.OutputRegister(), fp);
+      }
+      break;
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
     case kArchStoreWithWriteBarrier: {
       RecordWriteMode mode =
           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+      AddressingMode addressing_mode =
+          AddressingModeField::decode(instr->opcode());
       Register object = i.InputRegister(0);
-      Register index = i.InputRegister(1);
+      Operand index(0);
+      if (addressing_mode == kMode_MRI) {
+        index = Operand(i.InputInt64(1));
+      } else {
+        DCHECK_EQ(addressing_mode, kMode_MRR);
+        index = Operand(i.InputRegister(1));
+      }
       Register value = i.InputRegister(2);
       Register scratch0 = i.TempRegister(0);
       Register scratch1 = i.TempRegister(1);
@@ -629,6 +662,18 @@
       __ Bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      Register base;
+      if (offset.from_stack_pointer()) {
+        base = __ StackPointer();
+      } else {
+        base = fp;
+      }
+      __ Add(i.OutputRegister(0), base, Operand(offset.offset()));
+      break;
+    }
     case kArm64Float32RoundDown:
       __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
       break;
@@ -885,18 +930,41 @@
     case kArm64CompareAndBranch32:
       // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
       break;
-    case kArm64ClaimForCallArguments: {
-      __ Claim(i.InputInt32(0));
-      frame_access_state()->IncreaseSPDelta(i.InputInt32(0));
+    case kArm64ClaimCSP: {
+      int count = i.InputInt32(0);
+      Register prev = __ StackPointer();
+      __ SetStackPointer(csp);
+      __ Claim(count);
+      __ SetStackPointer(prev);
+      frame_access_state()->IncreaseSPDelta(count);
       break;
     }
-    case kArm64Poke: {
+    case kArm64ClaimJSSP: {
+      int count = i.InputInt32(0);
+      if (csp.Is(__ StackPointer())) {
+        // No JSP is set up. Compute it from the CSP.
+        int even = RoundUp(count, 2);
+        __ Sub(jssp, csp, count * kPointerSize);
+        __ Sub(csp, csp, even * kPointerSize);  // Must always be aligned.
+        frame_access_state()->IncreaseSPDelta(even);
+      } else {
+        // JSSP is the current stack pointer, just use regular Claim().
+        __ Claim(count);
+        frame_access_state()->IncreaseSPDelta(count);
+      }
+      break;
+    }
+    case kArm64PokeCSP:  // fall through
+    case kArm64PokeJSSP: {
+      Register prev = __ StackPointer();
+      __ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
       Operand operand(i.InputInt32(1) * kPointerSize);
       if (instr->InputAt(0)->IsDoubleRegister()) {
         __ Poke(i.InputFloat64Register(0), operand);
       } else {
         __ Poke(i.InputRegister(0), operand);
       }
+      __ SetStackPointer(prev);
       break;
     }
     case kArm64PokePair: {
@@ -916,6 +984,12 @@
     case kArm64Clz32:
       __ Clz(i.OutputRegister32(), i.InputRegister32(0));
       break;
+    case kArm64Rbit:
+      __ Rbit(i.OutputRegister64(), i.InputRegister64(0));
+      break;
+    case kArm64Rbit32:
+      __ Rbit(i.OutputRegister32(), i.InputRegister32(0));
+      break;
     case kArm64Cmp:
       __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
       break;
@@ -1042,9 +1116,15 @@
     case kArm64Float64ToFloat32:
       __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
       break;
+    case kArm64Float32ToInt32:
+      __ Fcvtzs(i.OutputRegister32(), i.InputFloat32Register(0));
+      break;
     case kArm64Float64ToInt32:
       __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
       break;
+    case kArm64Float32ToUint32:
+      __ Fcvtzu(i.OutputRegister32(), i.InputFloat32Register(0));
+      break;
     case kArm64Float64ToUint32:
       __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
       break;
@@ -1093,6 +1173,9 @@
         __ Cset(i.OutputRegister(1), ne);
       }
       break;
+    case kArm64Int32ToFloat32:
+      __ Scvtf(i.OutputFloat32Register(), i.InputRegister32(0));
+      break;
     case kArm64Int32ToFloat64:
       __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
       break;
@@ -1102,6 +1185,9 @@
     case kArm64Int64ToFloat64:
       __ Scvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
       break;
+    case kArm64Uint32ToFloat32:
+      __ Ucvtf(i.OutputFloat32Register(), i.InputRegister32(0));
+      break;
     case kArm64Uint32ToFloat64:
       __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
       break;
@@ -1376,8 +1462,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
@@ -1445,13 +1529,14 @@
       __ Bind(&return_label_);
       if (descriptor->UseNativeStack()) {
         __ Mov(csp, fp);
+        pop_count += (pop_count & 1);  // align
       } else {
         __ Mov(jssp, fp);
       }
       __ Pop(fp, lr);
     }
   } else if (descriptor->UseNativeStack()) {
-    pop_count += (pop_count & 1);
+    pop_count += (pop_count & 1);  // align
   }
   __ Drop(pop_count);
   __ Ret();
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
index ef33348..f03c2fb 100644
--- a/src/compiler/arm64/instruction-codes-arm64.h
+++ b/src/compiler/arm64/instruction-codes-arm64.h
@@ -73,11 +73,15 @@
   V(Arm64Ubfx32)                   \
   V(Arm64Ubfiz32)                  \
   V(Arm64Bfi)                      \
+  V(Arm64Rbit)                     \
+  V(Arm64Rbit32)                   \
   V(Arm64TestAndBranch32)          \
   V(Arm64TestAndBranch)            \
   V(Arm64CompareAndBranch32)       \
-  V(Arm64ClaimForCallArguments)    \
-  V(Arm64Poke)                     \
+  V(Arm64ClaimCSP)                 \
+  V(Arm64ClaimJSSP)                \
+  V(Arm64PokeCSP)                  \
+  V(Arm64PokeJSSP)                 \
   V(Arm64PokePair)                 \
   V(Arm64Float32Cmp)               \
   V(Arm64Float32Add)               \
@@ -110,15 +114,19 @@
   V(Arm64Float64RoundTiesEven)     \
   V(Arm64Float32ToFloat64)         \
   V(Arm64Float64ToFloat32)         \
+  V(Arm64Float32ToInt32)           \
   V(Arm64Float64ToInt32)           \
+  V(Arm64Float32ToUint32)          \
   V(Arm64Float64ToUint32)          \
   V(Arm64Float32ToInt64)           \
   V(Arm64Float64ToInt64)           \
   V(Arm64Float32ToUint64)          \
   V(Arm64Float64ToUint64)          \
+  V(Arm64Int32ToFloat32)           \
   V(Arm64Int32ToFloat64)           \
   V(Arm64Int64ToFloat32)           \
   V(Arm64Int64ToFloat64)           \
+  V(Arm64Uint32ToFloat32)          \
   V(Arm64Uint32ToFloat64)          \
   V(Arm64Uint64ToFloat32)          \
   V(Arm64Uint64ToFloat64)          \
@@ -143,7 +151,6 @@
   V(Arm64Ldr)                      \
   V(Arm64Str)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
@@ -169,6 +176,8 @@
   V(Operand2_R_SXTB)  /* %r0 SXTB (signed extend byte) */       \
   V(Operand2_R_SXTH)  /* %r0 SXTH (signed extend halfword) */
 
+enum ResetJSSPAfterCall { kNoResetJSSP, kResetJSSP };
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/arm64/instruction-scheduler-arm64.cc b/src/compiler/arm64/instruction-scheduler-arm64.cc
index eb358dd..ca37299 100644
--- a/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -75,6 +75,8 @@
     case kArm64Ubfx32:
     case kArm64Ubfiz32:
     case kArm64Bfi:
+    case kArm64Rbit:
+    case kArm64Rbit32:
     case kArm64Float32Cmp:
     case kArm64Float32Add:
     case kArm64Float32Sub:
@@ -106,15 +108,19 @@
     case kArm64Float32RoundUp:
     case kArm64Float32ToFloat64:
     case kArm64Float64ToFloat32:
+    case kArm64Float32ToInt32:
     case kArm64Float64ToInt32:
+    case kArm64Float32ToUint32:
     case kArm64Float64ToUint32:
     case kArm64Float32ToInt64:
     case kArm64Float64ToInt64:
     case kArm64Float32ToUint64:
     case kArm64Float64ToUint64:
+    case kArm64Int32ToFloat32:
     case kArm64Int32ToFloat64:
     case kArm64Int64ToFloat32:
     case kArm64Int64ToFloat64:
+    case kArm64Uint32ToFloat32:
     case kArm64Uint32ToFloat64:
     case kArm64Uint64ToFloat32:
     case kArm64Uint64ToFloat64:
@@ -141,8 +147,10 @@
     case kArm64Ldr:
       return kIsLoadOperation;
 
-    case kArm64ClaimForCallArguments:
-    case kArm64Poke:
+    case kArm64ClaimCSP:
+    case kArm64ClaimJSSP:
+    case kArm64PokeCSP:
+    case kArm64PokeJSSP:
     case kArm64PokePair:
     case kArm64StrS:
     case kArm64StrD:
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 1ec5ab4..26a2896 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -371,6 +371,7 @@
       opcode = kArm64Ldr;
       immediate_mode = kLoadStoreImm64;
       break;
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -398,10 +399,20 @@
   // TODO(arm64): I guess this could be done in a better way.
   if (write_barrier_kind != kNoWriteBarrier) {
     DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
-    inputs[input_count++] = g.UseUniqueRegister(index);
+    // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
+    // must check kArithmeticImm as well as kLoadStoreImm64.
+    if (g.CanBeImmediate(index, kArithmeticImm) &&
+        g.CanBeImmediate(index, kLoadStoreImm64)) {
+      inputs[input_count++] = g.UseImmediate(index);
+      addressing_mode = kMode_MRI;
+    } else {
+      inputs[input_count++] = g.UseUniqueRegister(index);
+      addressing_mode = kMode_MRR;
+    }
     inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
                                 ? g.UseRegister(value)
                                 : g.UseUniqueRegister(value);
@@ -423,6 +434,7 @@
     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
     size_t const temp_count = arraysize(temps);
     InstructionCode code = kArchStoreWithWriteBarrier;
+    code |= AddressingModeField::encode(addressing_mode);
     code |= MiscField::encode(static_cast<int>(record_write_mode));
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
@@ -455,6 +467,7 @@
         opcode = kArm64Str;
         immediate_mode = kLoadStoreImm64;
         break;
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -496,8 +509,9 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -534,8 +548,9 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -963,6 +978,16 @@
 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) {
+  VisitRR(this, kArm64Rbit32, node);
+}
+
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) {
+  VisitRR(this, kArm64Rbit, node);
+}
+
+
 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
 
 
@@ -1219,6 +1244,16 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kArm64Int32ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kArm64Uint32ToFloat32, node);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRR(this, kArm64Int32ToFloat64, node);
 }
@@ -1229,11 +1264,21 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kArm64Float32ToInt32, node);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   VisitRR(this, kArm64Float64ToInt32, node);
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kArm64Float32ToUint32, node);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
   VisitRR(this, kArm64Float64ToUint32, node);
 }
@@ -1583,30 +1628,27 @@
     Node* node) {
   Arm64OperandGenerator g(this);
 
-  // Push the arguments to the stack.
-  int aligned_push_count = static_cast<int>(arguments->size());
+  bool to_native_stack = descriptor->UseNativeStack();
 
-  bool pushed_count_uneven = aligned_push_count & 1;
-  int claim_count = aligned_push_count;
-  if (pushed_count_uneven && descriptor->UseNativeStack()) {
-    // We can only claim for an even number of call arguments when we use the
-    // native stack.
-    claim_count++;
+  int claim_count = static_cast<int>(arguments->size());
+  int slot = claim_count - 1;
+  if (to_native_stack) {
+    // Native stack must always be aligned to 16 (2 words).
+    claim_count = RoundUp(claim_count, 2);
   }
-  // TODO(dcarney): claim and poke probably take small immediates,
-  //                loop here or whatever.
+  // TODO(titzer): claim and poke probably take small immediates.
   // Bump the stack pointer(s).
-  if (aligned_push_count > 0) {
-    // TODO(dcarney): it would be better to bump the csp here only
+  if (claim_count > 0) {
+    // TODO(titzer): it would be better to bump the csp here only
     //                and emit paired stores with increment for non c frames.
-    Emit(kArm64ClaimForCallArguments, g.NoOutput(),
-         g.TempImmediate(claim_count));
+    ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
+    Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
   }
 
-  // Move arguments to the stack.
-  int slot = aligned_push_count - 1;
+  // Poke the arguments into the stack.
+  ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
   while (slot >= 0) {
-    Emit(kArm64Poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
+    Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
          g.TempImmediate(slot));
     slot--;
     // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
@@ -2191,7 +2233,9 @@
          MachineOperatorBuilder::kFloat64RoundTiesEven |
          MachineOperatorBuilder::kWord32ShiftIsSafe |
          MachineOperatorBuilder::kInt32DivIsSafe |
-         MachineOperatorBuilder::kUint32DivIsSafe;
+         MachineOperatorBuilder::kUint32DivIsSafe |
+         MachineOperatorBuilder::kWord32ReverseBits |
+         MachineOperatorBuilder::kWord64ReverseBits;
 }
 
 }  // namespace compiler
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index c70dfbf..abcf828 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -206,7 +206,6 @@
   int stack_height_;
 };
 
-
 // Helper class for a try-finally control scope. It can record intercepted
 // control-flow commands that cause entry into a finally-block, and re-apply
 // them after again leaving that block. Special tokens are used to identify
@@ -214,7 +213,10 @@
 class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
  public:
   explicit DeferredCommands(AstGraphBuilder* owner)
-      : owner_(owner), deferred_(owner->local_zone()) {}
+      : owner_(owner),
+        deferred_(owner->local_zone()),
+        return_token_(nullptr),
+        throw_token_(nullptr) {}
 
   // One recorded control-flow command.
   struct Entry {
@@ -226,7 +228,24 @@
   // Records a control-flow command while entering the finally-block. This also
   // generates a new dispatch token that identifies one particular path.
   Node* RecordCommand(Command cmd, Statement* stmt, Node* value) {
-    Node* token = NewPathTokenForDeferredCommand();
+    Node* token = nullptr;
+    switch (cmd) {
+      case CMD_BREAK:
+      case CMD_CONTINUE:
+        token = NewPathToken(dispenser_.GetBreakContinueToken());
+        break;
+      case CMD_THROW:
+        if (throw_token_) return throw_token_;
+        token = NewPathToken(TokenDispenserForFinally::kThrowToken);
+        throw_token_ = token;
+        break;
+      case CMD_RETURN:
+        if (return_token_) return return_token_;
+        token = NewPathToken(TokenDispenserForFinally::kReturnToken);
+        return_token_ = token;
+        break;
+    }
+    DCHECK_NOT_NULL(token);
     deferred_.push_back({cmd, stmt, token});
     return token;
   }
@@ -255,11 +274,11 @@
   }
 
  protected:
-  Node* NewPathTokenForDeferredCommand() {
-    return owner_->jsgraph()->Constant(static_cast<int>(deferred_.size()));
+  Node* NewPathToken(int token_id) {
+    return owner_->jsgraph()->Constant(token_id);
   }
   Node* NewPathTokenForImplicitFallThrough() {
-    return owner_->jsgraph()->Constant(-1);
+    return NewPathToken(TokenDispenserForFinally::kFallThroughToken);
   }
   Node* NewPathDispatchCondition(Node* t1, Node* t2) {
     // TODO(mstarzinger): This should be machine()->WordEqual(), but our Phi
@@ -268,8 +287,11 @@
   }
 
  private:
+  TokenDispenserForFinally dispenser_;
   AstGraphBuilder* owner_;
   ZoneVector<Entry> deferred_;
+  Node* return_token_;
+  Node* throw_token_;
 };
 
 
@@ -409,10 +431,13 @@
       DCHECK_EQ(IrOpcode::kDead,
                 NodeProperties::GetFrameStateInput(node, 0)->opcode());
 
+      bool node_has_exception = NodeProperties::IsExceptionalCall(node);
+
       Node* frame_state_after =
           id_after == BailoutId::None()
               ? builder_->jsgraph()->EmptyFrameState()
-              : builder_->environment()->Checkpoint(id_after, combine);
+              : builder_->environment()->Checkpoint(id_after, combine,
+                                                    node_has_exception);
 
       NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
     }
@@ -455,8 +480,7 @@
                          local_zone),
       frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
           FrameStateType::kJavaScriptFunction, info->num_parameters() + 1,
-          info->scope()->num_stack_slots(), info->shared_info(),
-          CALL_MAINTAINS_NATIVE_CONTEXT)) {
+          info->scope()->num_stack_slots(), info->shared_info())) {
   InitializeAstVisitor(info->isolate());
 }
 
@@ -589,7 +613,7 @@
 
   // Emit tracing call if requested to do so.
   if (FLAG_trace) {
-    NewNode(javascript()->CallRuntime(Runtime::kTraceEnter, 0));
+    NewNode(javascript()->CallRuntime(Runtime::kTraceEnter));
   }
 
   // Visit illegal re-declaration and bail out if it exists.
@@ -610,13 +634,6 @@
   // Visit statements in the function body.
   VisitStatements(info()->literal()->body());
 
-  // Emit tracing call if requested to do so.
-  if (FLAG_trace) {
-    // TODO(mstarzinger): Only traces implicit return.
-    Node* return_value = jsgraph()->UndefinedConstant();
-    NewNode(javascript()->CallRuntime(Runtime::kTraceExit, 1), return_value);
-  }
-
   // Return 'undefined' in case we can fall off the end.
   BuildReturn(jsgraph()->UndefinedConstant());
 }
@@ -854,9 +871,9 @@
       env_values, static_cast<size_t>(count));
 }
 
-
-Node* AstGraphBuilder::Environment::Checkpoint(
-    BailoutId ast_id, OutputFrameStateCombine combine) {
+Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id,
+                                               OutputFrameStateCombine combine,
+                                               bool owner_has_exception) {
   if (!builder()->info()->is_deoptimization_enabled()) {
     return builder()->jsgraph()->EmptyFrameState();
   }
@@ -876,7 +893,15 @@
 
   DCHECK(IsLivenessBlockConsistent());
   if (liveness_block() != nullptr) {
-    liveness_block()->Checkpoint(result);
+    // If the owning node has an exception, register the checkpoint to the
+    // predecessor so that the checkpoint is used for both the normal and the
+    // exceptional paths. Yes, this is a terrible hack and we might want
+    // to use an explicit frame state for the exceptional path.
+    if (owner_has_exception) {
+      liveness_block()->GetPredecessor()->Checkpoint(result);
+    } else {
+      liveness_block()->Checkpoint(result);
+    }
   }
   return result;
 }
@@ -1331,7 +1356,8 @@
 
     // Prepare for-in cache.
     Node* prepare = NewNode(javascript()->ForInPrepare(), object);
-    PrepareFrameState(prepare, stmt->EnumId(), OutputFrameStateCombine::Push());
+    PrepareFrameState(prepare, stmt->PrepareId(),
+                      OutputFrameStateCombine::Push(3));
     Node* cache_type = NewNode(common()->Projection(0), prepare);
     Node* cache_array = NewNode(common()->Projection(1), prepare);
     Node* cache_length = NewNode(common()->Projection(2), prepare);
@@ -1422,14 +1448,6 @@
   }
   try_control.EndTry();
 
-  // Insert lazy bailout point.
-  // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
-  // point. Ideally, we whould not re-enter optimized code when deoptimized
-  // lazily. Tracked by issue v8:4195.
-  NewNode(common()->LazyBailout(),
-          jsgraph()->ZeroConstant(),                      // dummy target.
-          environment()->Checkpoint(stmt->HandlerId()));  // frame state.
-
   // Clear message object as we enter the catch block.
   Node* the_hole = jsgraph()->TheHoleConstant();
   NewNode(javascript()->StoreMessage(), the_hole);
@@ -1474,14 +1492,6 @@
   }
   try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
 
-  // Insert lazy bailout point.
-  // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
-  // point. Ideally, we whould not re-enter optimized code when deoptimized
-  // lazily. Tracked by issue v8:4195.
-  NewNode(common()->LazyBailout(),
-          jsgraph()->ZeroConstant(),                      // dummy target.
-          environment()->Checkpoint(stmt->HandlerId()));  // frame state.
-
   // The result value semantics depend on how the block was entered:
   //  - ReturnStatement: It represents the return value being returned.
   //  - ThrowStatement: It represents the exception being thrown.
@@ -1493,7 +1503,7 @@
   // The result value, dispatch token and message is expected on the operand
   // stack (this is in sync with FullCodeGenerator::EnterFinallyBlock).
   Node* message = NewNode(javascript()->LoadMessage());
-  environment()->Push(token);  // TODO(mstarzinger): Cook token!
+  environment()->Push(token);
   environment()->Push(result);
   environment()->Push(message);
 
@@ -1509,20 +1519,17 @@
   // stack (this is in sync with FullCodeGenerator::ExitFinallyBlock).
   message = environment()->Pop();
   result = environment()->Pop();
-  token = environment()->Pop();  // TODO(mstarzinger): Uncook token!
+  token = environment()->Pop();
   NewNode(javascript()->StoreMessage(), message);
 
   // Dynamic dispatch after the finally-block.
   commands->ApplyDeferredCommands(token, result);
-
-  // TODO(mstarzinger): Remove bailout once everything works.
-  if (!FLAG_turbo_try_finally) SetStackOverflow();
 }
 
 
 void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
   Node* node =
-      NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement, 0));
+      NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
   PrepareFrameState(node, stmt->DebugBreakId());
   environment()->MarkAllLocalsLive();
 }
@@ -1557,33 +1564,27 @@
 
 
 void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
-  Node* class_name = expr->raw_name() ? jsgraph()->Constant(expr->name())
-                                      : jsgraph()->UndefinedConstant();
-
-  // The class name is expected on the operand stack.
-  environment()->Push(class_name);
   VisitForValueOrTheHole(expr->extends());
   VisitForValue(expr->constructor());
 
   // Create node to instantiate a new class.
   Node* constructor = environment()->Pop();
   Node* extends = environment()->Pop();
-  Node* name = environment()->Pop();
   Node* start = jsgraph()->Constant(expr->start_position());
   Node* end = jsgraph()->Constant(expr->end_position());
-  const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass, 5);
-  Node* literal = NewNode(opc, name, extends, constructor, start, end);
+  const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass);
+  Node* literal = NewNode(opc, extends, constructor, start, end);
   PrepareFrameState(literal, expr->CreateLiteralId(),
                     OutputFrameStateCombine::Push());
-
-  // The prototype is ensured to exist by Runtime_DefineClass. No access check
-  // is needed here since the constructor is created by the class literal.
-  Node* prototype =
-      BuildLoadObjectField(literal, JSFunction::kPrototypeOrInitialMapOffset);
-
-  // The class literal and the prototype are both expected on the operand stack
-  // during evaluation of the method values.
   environment()->Push(literal);
+
+  // Load the "prototype" from the constructor.
+  FrameStateBeforeAndAfter states(this, expr->CreateLiteralId());
+  Handle<Name> name = isolate()->factory()->prototype_string();
+  VectorSlotPair pair = CreateVectorSlotPair(expr->PrototypeSlot());
+  Node* prototype = BuildNamedLoad(literal, name, pair);
+  states.AddToNode(prototype, expr->PrototypeId(),
+                   OutputFrameStateCombine::Push());
   environment()->Push(prototype);
 
   // Create nodes to store method values into the literal.
@@ -1618,9 +1619,12 @@
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();
       case ObjectLiteral::Property::COMPUTED: {
+        Node* attr = jsgraph()->Constant(DONT_ENUM);
+        Node* set_function_name =
+            jsgraph()->Constant(property->NeedsSetFunctionName());
         const Operator* op =
-            javascript()->CallRuntime(Runtime::kDefineClassMethod, 3);
-        NewNode(op, receiver, key, value);
+            javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
+        NewNode(op, receiver, key, value, attr, set_function_name);
         break;
       }
       case ObjectLiteral::Property::GETTER: {
@@ -1645,7 +1649,7 @@
   prototype = environment()->Pop();
   literal = environment()->Pop();
   const Operator* op =
-      javascript()->CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+      javascript()->CallRuntime(Runtime::kFinalizeClassDefinition);
   literal = NewNode(op, literal, prototype);
 
   // Assign to class variable.
@@ -1774,8 +1778,7 @@
         Node* receiver = environment()->Pop();
         if (property->emit_store()) {
           Node* language = jsgraph()->Constant(SLOPPY);
-          const Operator* op =
-              javascript()->CallRuntime(Runtime::kSetProperty, 4);
+          const Operator* op = javascript()->CallRuntime(Runtime::kSetProperty);
           Node* set_property = NewNode(op, receiver, key, value, language);
           // SetProperty should not lazy deopt on an object literal.
           PrepareFrameState(set_property, BailoutId::None());
@@ -1790,7 +1793,7 @@
         Node* receiver = environment()->Pop();
         DCHECK(property->emit_store());
         const Operator* op =
-            javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+            javascript()->CallRuntime(Runtime::kInternalSetPrototype);
         Node* set_prototype = NewNode(op, receiver, value);
         // SetPrototype should not lazy deopt on an object literal.
         PrepareFrameState(set_prototype,
@@ -1823,7 +1826,7 @@
     Node* name = environment()->Pop();
     Node* attr = jsgraph()->Constant(NONE);
     const Operator* op =
-        javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+        javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
     Node* call = NewNode(op, literal, name, getter, setter, attr);
     // This should not lazy deopt on a new literal.
     PrepareFrameState(call, BailoutId::None());
@@ -1847,7 +1850,7 @@
       Node* value = environment()->Pop();
       Node* receiver = environment()->Pop();
       const Operator* op =
-          javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+          javascript()->CallRuntime(Runtime::kInternalSetPrototype);
       Node* call = NewNode(op, receiver, value);
       PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
       continue;
@@ -1868,10 +1871,11 @@
       case ObjectLiteral::Property::COMPUTED:
       case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
         Node* attr = jsgraph()->Constant(NONE);
+        Node* set_function_name =
+            jsgraph()->Constant(property->NeedsSetFunctionName());
         const Operator* op =
-            javascript()->CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
-        Node* call = NewNode(op, receiver, key, value, attr);
-        PrepareFrameState(call, BailoutId::None());
+            javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
+        NewNode(op, receiver, key, value, attr, set_function_name);
         break;
       }
       case ObjectLiteral::Property::PROTOTYPE:
@@ -1899,8 +1903,7 @@
   // Transform literals that contain functions to fast properties.
   literal = environment()->Top();  // Reload from operand stack.
   if (expr->has_function()) {
-    const Operator* op =
-        javascript()->CallRuntime(Runtime::kToFastProperties, 1);
+    const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
     NewNode(op, literal);
   }
 
@@ -1939,7 +1942,7 @@
   int array_index = 0;
   for (; array_index < expr->values()->length(); array_index++) {
     Expression* subexpr = expr->values()->at(array_index);
-    if (subexpr->IsSpread()) break;
+    DCHECK(!subexpr->IsSpread());
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     VisitForValue(subexpr);
@@ -1962,30 +1965,17 @@
   // number elements an iterable produces is unknown ahead of time.
   for (; array_index < expr->values()->length(); array_index++) {
     Expression* subexpr = expr->values()->at(array_index);
-    Node* result;
+    DCHECK(!subexpr->IsSpread());
 
-    if (subexpr->IsSpread()) {
-      VisitForValue(subexpr->AsSpread()->expression());
-      FrameStateBeforeAndAfter states(this,
-                                      subexpr->AsSpread()->expression()->id());
-      Node* iterable = environment()->Pop();
-      Node* array = environment()->Pop();
-      Node* function = BuildLoadNativeContextField(
-          Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX);
-      result = NewNode(javascript()->CallFunction(3, language_mode()), function,
-                       array, iterable);
-      states.AddToNode(result, expr->GetIdForElement(array_index));
-    } else {
-      VisitForValue(subexpr);
+    VisitForValue(subexpr);
+    {
       Node* value = environment()->Pop();
       Node* array = environment()->Pop();
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kAppendElement, 2);
-      result = NewNode(op, array, value);
+      const Operator* op = javascript()->CallRuntime(Runtime::kAppendElement);
+      Node* result = NewNode(op, array, value);
       PrepareFrameState(result, expr->GetIdForElement(array_index));
+      environment()->Push(result);
     }
-
-    environment()->Push(result);
   }
 
   ast_context()->ProduceValue(environment()->Pop());
@@ -2343,8 +2333,8 @@
       DCHECK(variable->location() == VariableLocation::LOOKUP);
       Node* name = jsgraph()->Constant(variable->name());
       const Operator* op =
-          javascript()->CallRuntime(Runtime::kLoadLookupSlot, 2);
-      Node* pair = NewNode(op, current_context(), name);
+          javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
+      Node* pair = NewNode(op, name);
       callee_value = NewNode(common()->Projection(0), pair);
       receiver_value = NewNode(common()->Projection(1), pair);
       PrepareFrameState(pair, expr->LookupId(),
@@ -2439,8 +2429,8 @@
         Variable* variable = callee->AsVariableProxy()->var();
         Node* name = jsgraph()->Constant(variable->name());
         const Operator* op =
-            javascript()->CallRuntime(Runtime::kLoadLookupSlot, 2);
-        Node* pair = NewNode(op, current_context(), name);
+            javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
+        Node* pair = NewNode(op, name);
         callee_value = NewNode(common()->Projection(0), pair);
         receiver_value = NewNode(common()->Projection(1), pair);
         PrepareFrameState(pair, expr->LookupId(),
@@ -2480,7 +2470,7 @@
     Node* language = jsgraph()->Constant(language_mode());
     Node* position = jsgraph()->Constant(current_scope()->start_position());
     const Operator* op =
-        javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+        javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval);
     Node* new_callee =
         NewNode(op, callee, source, function, language, position);
     PrepareFrameState(new_callee, expr->EvalId(),
@@ -2493,7 +2483,7 @@
   // Create node to perform the function call.
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
   const Operator* call = javascript()->CallFunction(
-      args->length() + 2, language_mode(), feedback, receiver_hint);
+      args->length() + 2, feedback, receiver_hint, expr->tail_call_mode());
   FrameStateBeforeAndAfter states(this, expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   environment()->Push(value->InputAt(0));  // The callee passed to the call.
@@ -2571,8 +2561,7 @@
   VisitForValues(args);
 
   // Create node to perform the JS runtime call.
-  const Operator* call =
-      javascript()->CallFunction(args->length() + 2, language_mode());
+  const Operator* call = javascript()->CallFunction(args->length() + 2);
   FrameStateBeforeAndAfter states(this, expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
@@ -2591,6 +2580,7 @@
 
   // TODO(mstarzinger): This bailout is a gigantic hack, the owner is ashamed.
   if (function->function_id == Runtime::kInlineGeneratorNext ||
+      function->function_id == Runtime::kInlineGeneratorReturn ||
       function->function_id == Runtime::kInlineGeneratorThrow) {
     ast_context()->ProduceValue(jsgraph()->TheHoleConstant());
     return SetStackOverflow();
@@ -2740,7 +2730,7 @@
     // TODO(bmeurer): Cleanup this feedback/bailout mess!
     FrameStateBeforeAndAfter states(this, BailoutId::None());
     value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
-                          expr->binary_op(), TypeFeedbackId::None());
+                          expr->binary_op(), expr->CountBinOpFeedbackId());
     // This should never deoptimize outside strong mode because otherwise we
     // have converted to number before.
     states.AddToNode(value, is_strong(language_mode()) ? expr->ToNumberId()
@@ -2848,16 +2838,16 @@
       op = javascript()->StrictNotEqual();
       break;
     case Token::LT:
-      op = javascript()->LessThan(language_mode());
+      op = javascript()->LessThan();
       break;
     case Token::GT:
-      op = javascript()->GreaterThan(language_mode());
+      op = javascript()->GreaterThan();
       break;
     case Token::LTE:
-      op = javascript()->LessThanOrEqual(language_mode());
+      op = javascript()->LessThanOrEqual();
       break;
     case Token::GTE:
-      op = javascript()->GreaterThanOrEqual(language_mode());
+      op = javascript()->GreaterThanOrEqual();
       break;
     case Token::INSTANCEOF:
       op = javascript()->InstanceOf();
@@ -2930,7 +2920,7 @@
                       DeclareGlobalsLanguageMode::encode(language_mode());
   Node* flags = jsgraph()->Constant(encoded_flags);
   Node* pairs = jsgraph()->Constant(data);
-  const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals, 2);
+  const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
   Node* call = NewNode(op, pairs, flags);
   PrepareFrameState(call, BailoutId::Declarations());
   globals()->clear();
@@ -3072,8 +3062,7 @@
 }
 
 
-void AstGraphBuilder::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* node) {
+void AstGraphBuilder::VisitRewritableExpression(RewritableExpression* node) {
   Visit(node->expression());
 }
 
@@ -3209,11 +3198,11 @@
   if (arguments == nullptr) return nullptr;
 
   // Allocate and initialize a new arguments object.
-  CreateArgumentsParameters::Type type =
+  CreateArgumentsType type =
       is_strict(language_mode()) || !info()->has_simple_parameters()
-          ? CreateArgumentsParameters::kUnmappedArguments
-          : CreateArgumentsParameters::kMappedArguments;
-  const Operator* op = javascript()->CreateArguments(type, 0);
+          ? CreateArgumentsType::kUnmappedArguments
+          : CreateArgumentsType::kMappedArguments;
+  const Operator* op = javascript()->CreateArguments(type);
   Node* object = NewNode(op, GetFunctionClosure());
   PrepareFrameState(object, BailoutId::None());
 
@@ -3231,8 +3220,8 @@
   if (rest == nullptr) return nullptr;
 
   // Allocate and initialize a new arguments object.
-  CreateArgumentsParameters::Type type = CreateArgumentsParameters::kRestArray;
-  const Operator* op = javascript()->CreateArguments(type, index);
+  CreateArgumentsType type = CreateArgumentsType::kRestParameter;
+  const Operator* op = javascript()->CreateArguments(type);
   Node* object = NewNode(op, GetFunctionClosure());
   PrepareFrameState(object, BailoutId::None());
 
@@ -3405,8 +3394,7 @@
                                      feedback, combine, typeof_mode)) {
         return node;
       }
-      const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
-      Node* value = NewNode(op, BuildLoadFeedbackVector(), current_context());
+      Node* value = BuildDynamicLoad(name, typeof_mode);
       states.AddToNode(value, bailout_id, combine);
       return value;
     }
@@ -3440,8 +3428,8 @@
       // Dynamic lookup of context variable (anywhere in the chain).
       Node* name = jsgraph()->Constant(variable->name());
       const Operator* op =
-          javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
-      Node* result = NewNode(op, current_context(), name);
+          javascript()->CallRuntime(Runtime::kDeleteLookupSlot);
+      Node* result = NewNode(op, name);
       PrepareFrameState(result, bailout_id, combine);
       return result;
     }
@@ -3563,13 +3551,10 @@
     }
     case VariableLocation::LOOKUP: {
       // Dynamic lookup of context variable (anywhere in the chain).
-      Node* name = jsgraph()->Constant(variable->name());
-      Node* language = jsgraph()->Constant(language_mode());
+      Handle<Name> name = variable->name();
       // TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
       // initializations of const declarations.
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
-      Node* store = NewNode(op, value, current_context(), name, language);
+      Node* store = BuildDynamicStore(name, value);
       PrepareFrameState(store, bailout_id, combine);
       return store;
     }
@@ -3581,16 +3566,16 @@
 
 Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
                                       const VectorSlotPair& feedback) {
-  const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
-  Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->LoadProperty(feedback);
+  Node* node = NewNode(op, object, key, GetFunctionClosure());
   return node;
 }
 
 
 Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
                                       const VectorSlotPair& feedback) {
-  const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
-  Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->LoadNamed(name, feedback);
+  Node* node = NewNode(op, object, GetFunctionClosure());
   return node;
 }
 
@@ -3598,7 +3583,7 @@
 Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
                                        const VectorSlotPair& feedback) {
   const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
-  Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
+  Node* node = NewNode(op, object, key, value, GetFunctionClosure());
   return node;
 }
 
@@ -3608,7 +3593,7 @@
                                        const VectorSlotPair& feedback) {
   const Operator* op =
       javascript()->StoreNamed(language_mode(), name, feedback);
-  Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+  Node* node = NewNode(op, object, value, GetFunctionClosure());
   return node;
 }
 
@@ -3617,9 +3602,8 @@
                                            Handle<Name> name,
                                            const VectorSlotPair& feedback) {
   Node* name_node = jsgraph()->Constant(name);
-  Node* language = jsgraph()->Constant(language_mode());
-  const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper, 4);
-  Node* node = NewNode(op, receiver, home_object, name_node, language);
+  const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper);
+  Node* node = NewNode(op, receiver, home_object, name_node);
   return node;
 }
 
@@ -3627,10 +3611,8 @@
 Node* AstGraphBuilder::BuildKeyedSuperLoad(Node* receiver, Node* home_object,
                                            Node* key,
                                            const VectorSlotPair& feedback) {
-  Node* language = jsgraph()->Constant(language_mode());
-  const Operator* op =
-      javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
-  Node* node = NewNode(op, receiver, home_object, key, language);
+  const Operator* op = javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper);
+  Node* node = NewNode(op, receiver, home_object, key);
   return node;
 }
 
@@ -3662,7 +3644,7 @@
                                        const VectorSlotPair& feedback,
                                        TypeofMode typeof_mode) {
   const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
-  Node* node = NewNode(op, BuildLoadFeedbackVector());
+  Node* node = NewNode(op, GetFunctionClosure());
   return node;
 }
 
@@ -3671,22 +3653,30 @@
                                         const VectorSlotPair& feedback) {
   const Operator* op =
       javascript()->StoreGlobal(language_mode(), name, feedback);
-  Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+  Node* node = NewNode(op, value, GetFunctionClosure());
   return node;
 }
 
 
-Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
-  return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
-                 jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
+Node* AstGraphBuilder::BuildDynamicLoad(Handle<Name> name,
+                                        TypeofMode typeof_mode) {
+  Node* name_node = jsgraph()->Constant(name);
+  const Operator* op =
+      javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+                                    ? Runtime::kLoadLookupSlot
+                                    : Runtime::kLoadLookupSlotInsideTypeof);
+  Node* node = NewNode(op, name_node);
+  return node;
 }
 
 
-Node* AstGraphBuilder::BuildLoadImmutableObjectField(Node* object, int offset) {
-  return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
-                          object,
-                          jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
-                          graph()->start(), graph()->start());
+Node* AstGraphBuilder::BuildDynamicStore(Handle<Name> name, Node* value) {
+  Node* name_node = jsgraph()->Constant(name);
+  const Operator* op = javascript()->CallRuntime(
+      is_strict(language_mode()) ? Runtime::kStoreLookupSlot_Strict
+                                 : Runtime::kStoreLookupSlot_Sloppy);
+  Node* node = NewNode(op, name_node, value);
+  return node;
 }
 
 
@@ -3703,19 +3693,6 @@
 }
 
 
-Node* AstGraphBuilder::BuildLoadFeedbackVector() {
-  if (!feedback_vector_.is_set()) {
-    Node* closure = GetFunctionClosure();
-    Node* shared = BuildLoadImmutableObjectField(
-        closure, JSFunction::kSharedFunctionInfoOffset);
-    Node* vector = BuildLoadImmutableObjectField(
-        shared, SharedFunctionInfo::kFeedbackVectorOffset);
-    feedback_vector_.set(vector);
-  }
-  return feedback_vector_.get();
-}
-
-
 Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
   if (Node* node = TryFastToBoolean(input)) return node;
   ToBooleanHints hints;
@@ -3758,7 +3735,7 @@
 
 
 Node* AstGraphBuilder::BuildThrowError(Node* exception, BailoutId bailout_id) {
-  const Operator* op = javascript()->CallRuntime(Runtime::kThrow, 1);
+  const Operator* op = javascript()->CallRuntime(Runtime::kThrow);
   Node* call = NewNode(op, exception);
   PrepareFrameState(call, bailout_id);
   Node* control = NewNode(common()->Throw(), call);
@@ -3770,8 +3747,7 @@
 Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
                                                 BailoutId bailout_id) {
   Node* variable_name = jsgraph()->Constant(variable->name());
-  const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowReferenceError, 1);
+  const Operator* op = javascript()->CallRuntime(Runtime::kThrowReferenceError);
   Node* call = NewNode(op, variable_name);
   PrepareFrameState(call, bailout_id);
   Node* control = NewNode(common()->Throw(), call);
@@ -3782,7 +3758,7 @@
 
 Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
   const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowConstAssignError, 0);
+      javascript()->CallRuntime(Runtime::kThrowConstAssignError);
   Node* call = NewNode(op);
   PrepareFrameState(call, bailout_id);
   Node* control = NewNode(common()->Throw(), call);
@@ -3793,7 +3769,7 @@
 
 Node* AstGraphBuilder::BuildThrowStaticPrototypeError(BailoutId bailout_id) {
   const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError, 0);
+      javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError);
   Node* call = NewNode(op);
   PrepareFrameState(call, bailout_id);
   Node* control = NewNode(common()->Throw(), call);
@@ -3804,7 +3780,7 @@
 
 Node* AstGraphBuilder::BuildThrowUnsupportedSuperError(BailoutId bailout_id) {
   const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+      javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError);
   Node* call = NewNode(op);
   PrepareFrameState(call, bailout_id);
   Node* control = NewNode(common()->Throw(), call);
@@ -3814,6 +3790,11 @@
 
 
 Node* AstGraphBuilder::BuildReturn(Node* return_value) {
+  // Emit tracing call if requested to do so.
+  if (FLAG_trace) {
+    return_value =
+        NewNode(javascript()->CallRuntime(Runtime::kTraceExit), return_value);
+  }
   Node* control = NewNode(common()->Return(), return_value);
   UpdateControlDependencyToLeaveFunction(control);
   return control;
@@ -3821,7 +3802,7 @@
 
 
 Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
-  NewNode(javascript()->CallRuntime(Runtime::kReThrow, 1), exception_value);
+  NewNode(javascript()->CallRuntime(Runtime::kReThrow), exception_value);
   Node* control = NewNode(common()->Throw(), exception_value);
   UpdateControlDependencyToLeaveFunction(control);
   return control;
@@ -3838,37 +3819,37 @@
   }
   switch (op) {
     case Token::BIT_OR:
-      js_op = javascript()->BitwiseOr(language_mode(), hints);
+      js_op = javascript()->BitwiseOr(hints);
       break;
     case Token::BIT_AND:
-      js_op = javascript()->BitwiseAnd(language_mode(), hints);
+      js_op = javascript()->BitwiseAnd(hints);
       break;
     case Token::BIT_XOR:
-      js_op = javascript()->BitwiseXor(language_mode(), hints);
+      js_op = javascript()->BitwiseXor(hints);
       break;
     case Token::SHL:
-      js_op = javascript()->ShiftLeft(language_mode(), hints);
+      js_op = javascript()->ShiftLeft(hints);
       break;
     case Token::SAR:
-      js_op = javascript()->ShiftRight(language_mode(), hints);
+      js_op = javascript()->ShiftRight(hints);
       break;
     case Token::SHR:
-      js_op = javascript()->ShiftRightLogical(language_mode(), hints);
+      js_op = javascript()->ShiftRightLogical(hints);
       break;
     case Token::ADD:
-      js_op = javascript()->Add(language_mode(), hints);
+      js_op = javascript()->Add(hints);
       break;
     case Token::SUB:
-      js_op = javascript()->Subtract(language_mode(), hints);
+      js_op = javascript()->Subtract(hints);
       break;
     case Token::MUL:
-      js_op = javascript()->Multiply(language_mode(), hints);
+      js_op = javascript()->Multiply(hints);
       break;
     case Token::DIV:
-      js_op = javascript()->Divide(language_mode(), hints);
+      js_op = javascript()->Divide(hints);
       break;
     case Token::MOD:
-      js_op = javascript()->Modulus(language_mode(), hints);
+      js_op = javascript()->Modulus(hints);
       break;
     default:
       UNREACHABLE();
@@ -3916,17 +3897,21 @@
       fast_block.BreakUnless(check, BranchHint::kTrue);
     }
 
-    // Fast case, because variable is not shadowed. Perform global slot load.
-    Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
-    states.AddToNode(fast, bailout_id, combine);
-    environment()->Push(fast);
+    // Fast case, because variable is not shadowed.
+    if (Node* constant = TryLoadGlobalConstant(name)) {
+      environment()->Push(constant);
+    } else {
+      // Perform global slot load.
+      Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
+      states.AddToNode(fast, bailout_id, combine);
+      environment()->Push(fast);
+    }
     slow_block.Break();
     environment()->Pop();
     fast_block.EndBlock();
 
     // Slow case, because variable potentially shadowed. Perform dynamic lookup.
-    const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
-    Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+    Node* slow = BuildDynamicLoad(name, typeof_mode);
     states.AddToNode(slow, bailout_id, combine);
     environment()->Push(slow);
     slow_block.EndBlock();
@@ -3969,8 +3954,7 @@
     fast_block.EndBlock();
 
     // Slow case, because variable potentially shadowed. Perform dynamic lookup.
-    const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
-    Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+    Node* slow = BuildDynamicLoad(name, typeof_mode);
     states.AddToNode(slow, bailout_id, combine);
     environment()->Push(slow);
     slow_block.EndBlock();
@@ -4047,8 +4031,10 @@
 
     DCHECK_EQ(IrOpcode::kDead,
               NodeProperties::GetFrameStateInput(node, 0)->opcode());
+    bool node_has_exception = NodeProperties::IsExceptionalCall(node);
     NodeProperties::ReplaceFrameStateInput(
-        node, 0, environment()->Checkpoint(ast_id, combine));
+        node, 0,
+        environment()->Checkpoint(ast_id, combine, node_has_exception));
   }
 }
 
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 3b6302d..6cff237 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -314,14 +314,13 @@
   Node* BuildGlobalStore(Handle<Name> name, Node* value,
                          const VectorSlotPair& feedback);
 
+  // Builders for dynamic variable loads and stores.
+  Node* BuildDynamicLoad(Handle<Name> name, TypeofMode typeof_mode);
+  Node* BuildDynamicStore(Handle<Name> name, Node* value);
+
   // Builders for accessing the function context.
   Node* BuildLoadGlobalObject();
   Node* BuildLoadNativeContextField(int index);
-  Node* BuildLoadFeedbackVector();
-
-  // Builder for accessing a (potentially immutable) object field.
-  Node* BuildLoadObjectField(Node* object, int offset);
-  Node* BuildLoadImmutableObjectField(Node* object, int offset);
 
   // Builders for automatic type conversion.
   Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
@@ -519,7 +518,8 @@
   // Preserve a checkpoint of the environment for the IR graph. Any
   // further mutation of the environment will not affect checkpoints.
   Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine =
-                                         OutputFrameStateCombine::Ignore());
+                                         OutputFrameStateCombine::Ignore(),
+                   bool node_has_exception = false);
 
   // Control dependency tracked by this environment.
   Node* GetControlDependency() { return control_dependency_; }
diff --git a/src/compiler/ast-loop-assignment-analyzer.cc b/src/compiler/ast-loop-assignment-analyzer.cc
index 2074c94..ac96399 100644
--- a/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/src/compiler/ast-loop-assignment-analyzer.cc
@@ -198,7 +198,7 @@
 }
 
 
-void ALAA::VisitSpread(Spread* e) { Visit(e->expression()); }
+void ALAA::VisitSpread(Spread* e) { UNREACHABLE(); }
 
 
 void ALAA::VisitEmptyParentheses(EmptyParentheses* e) { UNREACHABLE(); }
@@ -266,7 +266,6 @@
   Visit(loop->assign_iterator());
   Enter(loop);
   Visit(loop->assign_each());
-  Visit(loop->each());
   Visit(loop->subject());
   Visit(loop->body());
   Exit(loop);
@@ -288,8 +287,7 @@
 }
 
 
-void ALAA::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* expr) {
+void ALAA::VisitRewritableExpression(RewritableExpression* expr) {
   Visit(expr->expression());
 }
 
diff --git a/src/compiler/bytecode-branch-analysis.cc b/src/compiler/bytecode-branch-analysis.cc
index 27699a1..4e96a53 100644
--- a/src/compiler/bytecode-branch-analysis.cc
+++ b/src/compiler/bytecode-branch-analysis.cc
@@ -11,115 +11,33 @@
 namespace internal {
 namespace compiler {
 
-// The class contains all of the sites that contain
-// branches to a particular target (bytecode offset).
-class BytecodeBranchInfo final : public ZoneObject {
- public:
-  explicit BytecodeBranchInfo(Zone* zone)
-      : back_edge_offsets_(zone), fore_edge_offsets_(zone) {}
-
-  void AddBranch(int source_offset, int target_offset);
-
-  // The offsets of bytecodes that refer to this bytecode as
-  // a back-edge predecessor.
-  const ZoneVector<int>* back_edge_offsets() { return &back_edge_offsets_; }
-
-  // The offsets of bytecodes that refer to this bytecode as
-  // a forwards-edge predecessor.
-  const ZoneVector<int>* fore_edge_offsets() { return &fore_edge_offsets_; }
-
- private:
-  ZoneVector<int> back_edge_offsets_;
-  ZoneVector<int> fore_edge_offsets_;
-
-  DISALLOW_COPY_AND_ASSIGN(BytecodeBranchInfo);
-};
-
-
-void BytecodeBranchInfo::AddBranch(int source_offset, int target_offset) {
-  if (source_offset < target_offset) {
-    fore_edge_offsets_.push_back(source_offset);
-  } else {
-    back_edge_offsets_.push_back(source_offset);
-  }
-}
-
-
 BytecodeBranchAnalysis::BytecodeBranchAnalysis(
     Handle<BytecodeArray> bytecode_array, Zone* zone)
-    : branch_infos_(zone),
-      bytecode_array_(bytecode_array),
-      reachable_(bytecode_array->length(), zone),
+    : bytecode_array_(bytecode_array),
+      is_backward_target_(bytecode_array->length(), zone),
+      is_forward_target_(bytecode_array->length(), zone),
       zone_(zone) {}
 
-
 void BytecodeBranchAnalysis::Analyze() {
   interpreter::BytecodeArrayIterator iterator(bytecode_array());
-  bool reachable = true;
   while (!iterator.done()) {
     interpreter::Bytecode bytecode = iterator.current_bytecode();
     int current_offset = iterator.current_offset();
-    // All bytecode basic blocks are generated to be forward reachable
-    // and may also be backward reachable. Hence if there's a forward
-    // branch targetting here the code becomes reachable.
-    reachable = reachable || forward_branches_target(current_offset);
-    if (reachable) {
-      reachable_.Add(current_offset);
-      if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
-        // Only the branch is recorded, the forward path falls through
-        // and is handled as normal bytecode data flow.
-        AddBranch(current_offset, iterator.GetJumpTargetOffset());
-      } else if (interpreter::Bytecodes::IsJump(bytecode)) {
-        // Unless the branch targets the next bytecode it's not
-        // reachable. If it targets the next bytecode the check at the
-        // start of the loop will set the reachable flag.
-        AddBranch(current_offset, iterator.GetJumpTargetOffset());
-        reachable = false;
-      } else if (interpreter::Bytecodes::IsJumpOrReturn(bytecode)) {
-        DCHECK_EQ(bytecode, interpreter::Bytecode::kReturn);
-        reachable = false;
-      }
+    if (interpreter::Bytecodes::IsJump(bytecode)) {
+      AddBranch(current_offset, iterator.GetJumpTargetOffset());
     }
     iterator.Advance();
   }
 }
 
-
-const ZoneVector<int>* BytecodeBranchAnalysis::BackwardBranchesTargetting(
-    int offset) const {
-  auto iterator = branch_infos_.find(offset);
-  if (branch_infos_.end() != iterator) {
-    return iterator->second->back_edge_offsets();
-  } else {
-    return nullptr;
-  }
-}
-
-
-const ZoneVector<int>* BytecodeBranchAnalysis::ForwardBranchesTargetting(
-    int offset) const {
-  auto iterator = branch_infos_.find(offset);
-  if (branch_infos_.end() != iterator) {
-    return iterator->second->fore_edge_offsets();
-  } else {
-    return nullptr;
-  }
-}
-
-
 void BytecodeBranchAnalysis::AddBranch(int source_offset, int target_offset) {
-  BytecodeBranchInfo* branch_info = nullptr;
-  auto iterator = branch_infos_.find(target_offset);
-  if (branch_infos_.end() == iterator) {
-    branch_info = new (zone()) BytecodeBranchInfo(zone());
-    branch_infos_.insert(std::make_pair(target_offset, branch_info));
+  if (source_offset < target_offset) {
+    is_forward_target_.Add(target_offset);
   } else {
-    branch_info = iterator->second;
+    is_backward_target_.Add(target_offset);
   }
-  branch_info->AddBranch(source_offset, target_offset);
 }
 
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/bytecode-branch-analysis.h b/src/compiler/bytecode-branch-analysis.h
index 0ef33b6..7d32da8 100644
--- a/src/compiler/bytecode-branch-analysis.h
+++ b/src/compiler/bytecode-branch-analysis.h
@@ -7,7 +7,6 @@
 
 #include "src/bit-vector.h"
 #include "src/handles.h"
-#include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -16,15 +15,13 @@
 
 namespace compiler {
 
-class BytecodeBranchInfo;
-
-// A class for identifying the branch targets and their branch sites
-// within a bytecode array and also identifying which bytecodes are
-// reachable. This information can be used to construct the local
-// control flow logic for high-level IR graphs built from bytecode.
+// A class for identifying branch targets within a bytecode array.
+// This information can be used to construct the local control flow
+// logic for high-level IR graphs built from bytecode.
 //
-// NB This class relies on the only backwards branches in bytecode
-// being jumps back to loop headers.
+// N.B. If this class is used to determine loop headers, then such a
+// usage relies on the only backwards branches in bytecode being jumps
+// back to loop headers.
 class BytecodeBranchAnalysis BASE_EMBEDDED {
  public:
   BytecodeBranchAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone);
@@ -34,27 +31,16 @@
   // until this has been called.
   void Analyze();
 
-  // Offsets of bytecodes having a backward branch to the bytecode at |offset|.
-  const ZoneVector<int>* BackwardBranchesTargetting(int offset) const;
-
-  // Offsets of bytecodes having a forward branch to the bytecode at |offset|.
-  const ZoneVector<int>* ForwardBranchesTargetting(int offset) const;
-
-  // Returns true if the bytecode at |offset| is reachable.
-  bool is_reachable(int offset) const { return reachable_.Contains(offset); }
-
   // Returns true if there are any forward branches to the bytecode at
   // |offset|.
   bool forward_branches_target(int offset) const {
-    const ZoneVector<int>* sites = ForwardBranchesTargetting(offset);
-    return sites != nullptr && sites->size() > 0;
+    return is_forward_target_.Contains(offset);
   }
 
   // Returns true if there are any backward branches to the bytecode
   // at |offset|.
   bool backward_branches_target(int offset) const {
-    const ZoneVector<int>* sites = BackwardBranchesTargetting(offset);
-    return sites != nullptr && sites->size() > 0;
+    return is_backward_target_.Contains(offset);
   }
 
  private:
@@ -63,9 +49,9 @@
   Zone* zone() const { return zone_; }
   Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
 
-  ZoneMap<int, BytecodeBranchInfo*> branch_infos_;
   Handle<BytecodeArray> bytecode_array_;
-  BitVector reachable_;
+  BitVector is_backward_target_;
+  BitVector is_forward_target_;
   Zone* zone_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeBranchAnalysis);
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index cf0b6ab..e28c19d 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -13,25 +13,108 @@
 namespace internal {
 namespace compiler {
 
+// The abstract execution environment simulates the content of the interpreter
+// register file. The environment performs SSA-renaming of all tracked nodes at
+// split and merge points in the control flow.
+class BytecodeGraphBuilder::Environment : public ZoneObject {
+ public:
+  Environment(BytecodeGraphBuilder* builder, int register_count,
+              int parameter_count, Node* control_dependency, Node* context);
+
+  int parameter_count() const { return parameter_count_; }
+  int register_count() const { return register_count_; }
+
+  Node* LookupAccumulator() const;
+  Node* LookupRegister(interpreter::Register the_register) const;
+
+  void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
+  void BindRegister(interpreter::Register the_register, Node* node,
+                    FrameStateBeforeAndAfter* states = nullptr);
+  void BindRegistersToProjections(interpreter::Register first_reg, Node* node,
+                                  FrameStateBeforeAndAfter* states = nullptr);
+  void RecordAfterState(Node* node, FrameStateBeforeAndAfter* states);
+
+  // Effect dependency tracked by this environment.
+  Node* GetEffectDependency() { return effect_dependency_; }
+  void UpdateEffectDependency(Node* dependency) {
+    effect_dependency_ = dependency;
+  }
+
+  // Preserve a checkpoint of the environment for the IR graph. Any
+  // further mutation of the environment will not affect checkpoints.
+  Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine);
+
+  // Returns true if the state values are up to date with the current
+  // environment.
+  bool StateValuesAreUpToDate(int output_poke_offset, int output_poke_count);
+
+  // Control dependency tracked by this environment.
+  Node* GetControlDependency() const { return control_dependency_; }
+  void UpdateControlDependency(Node* dependency) {
+    control_dependency_ = dependency;
+  }
+
+  Node* Context() const { return context_; }
+  void SetContext(Node* new_context) { context_ = new_context; }
+
+  Environment* CopyForConditional() const;
+  Environment* CopyForLoop();
+  void Merge(Environment* other);
+
+ private:
+  explicit Environment(const Environment* copy);
+  void PrepareForLoop();
+  bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
+                              int output_poke_start, int output_poke_end);
+  bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
+  void UpdateStateValues(Node** state_values, int offset, int count);
+
+  int RegisterToValuesIndex(interpreter::Register the_register) const;
+
+  Zone* zone() const { return builder_->local_zone(); }
+  Graph* graph() const { return builder_->graph(); }
+  CommonOperatorBuilder* common() const { return builder_->common(); }
+  BytecodeGraphBuilder* builder() const { return builder_; }
+  const NodeVector* values() const { return &values_; }
+  NodeVector* values() { return &values_; }
+  int register_base() const { return register_base_; }
+  int accumulator_base() const { return accumulator_base_; }
+
+  BytecodeGraphBuilder* builder_;
+  int register_count_;
+  int parameter_count_;
+  Node* context_;
+  Node* control_dependency_;
+  Node* effect_dependency_;
+  NodeVector values_;
+  Node* parameters_state_values_;
+  Node* registers_state_values_;
+  Node* accumulator_state_values_;
+  int register_base_;
+  int accumulator_base_;
+};
+
 // Helper for generating frame states for before and after a bytecode.
 class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
  public:
-  FrameStateBeforeAndAfter(BytecodeGraphBuilder* builder,
-                           const interpreter::BytecodeArrayIterator& iterator)
+  explicit FrameStateBeforeAndAfter(BytecodeGraphBuilder* builder)
       : builder_(builder),
         id_after_(BailoutId::None()),
         added_to_node_(false),
+        frame_states_unused_(false),
         output_poke_offset_(0),
         output_poke_count_(0) {
-    BailoutId id_before(iterator.current_offset());
+    BailoutId id_before(builder->bytecode_iterator().current_offset());
     frame_state_before_ = builder_->environment()->Checkpoint(
         id_before, OutputFrameStateCombine::Ignore());
-    id_after_ = BailoutId(id_before.ToInt() + iterator.current_bytecode_size());
+    id_after_ = BailoutId(id_before.ToInt() +
+                          builder->bytecode_iterator().current_bytecode_size());
   }
 
   ~FrameStateBeforeAndAfter() {
     DCHECK(added_to_node_);
-    DCHECK(builder_->environment()->StateValuesAreUpToDate(output_poke_offset_,
+    DCHECK(frame_states_unused_ ||
+           builder_->environment()->StateValuesAreUpToDate(output_poke_offset_,
                                                            output_poke_count_));
   }
 
@@ -62,6 +145,7 @@
       output_poke_offset_ = static_cast<int>(combine.GetOffsetToPokeAt());
       output_poke_count_ = node->op()->ValueOutputCount();
     }
+    frame_states_unused_ = count == 0;
     added_to_node_ = true;
   }
 
@@ -70,6 +154,7 @@
   BailoutId id_after_;
 
   bool added_to_node_;
+  bool frame_states_unused_;
   int output_poke_offset_;
   int output_poke_count_;
 };
@@ -155,8 +240,8 @@
 
 Node* BytecodeGraphBuilder::Environment::LookupRegister(
     interpreter::Register the_register) const {
-  if (the_register.is_function_context()) {
-    return builder()->GetFunctionContext();
+  if (the_register.is_current_context()) {
+    return Context();
   } else if (the_register.is_function_closure()) {
     return builder()->GetFunctionClosure();
   } else if (the_register.is_new_target()) {
@@ -168,16 +253,6 @@
 }
 
 
-void BytecodeGraphBuilder::Environment::ExchangeRegisters(
-    interpreter::Register reg0, interpreter::Register reg1) {
-  int reg0_index = RegisterToValuesIndex(reg0);
-  int reg1_index = RegisterToValuesIndex(reg1);
-  Node* saved_reg0_value = values()->at(reg0_index);
-  values()->at(reg0_index) = values()->at(reg1_index);
-  values()->at(reg1_index) = saved_reg0_value;
-}
-
-
 void BytecodeGraphBuilder::Environment::BindAccumulator(
     Node* node, FrameStateBeforeAndAfter* states) {
   if (states) {
@@ -220,16 +295,6 @@
 }
 
 
-bool BytecodeGraphBuilder::Environment::IsMarkedAsUnreachable() const {
-  return GetControlDependency()->opcode() == IrOpcode::kDead;
-}
-
-
-void BytecodeGraphBuilder::Environment::MarkAsUnreachable() {
-  UpdateControlDependency(builder()->jsgraph()->Dead());
-}
-
-
 BytecodeGraphBuilder::Environment*
 BytecodeGraphBuilder::Environment::CopyForLoop() {
   PrepareForLoop();
@@ -245,11 +310,6 @@
 
 void BytecodeGraphBuilder::Environment::Merge(
     BytecodeGraphBuilder::Environment* other) {
-  // Nothing to do if the other environment is dead.
-  if (other->IsMarkedAsUnreachable()) {
-    return;
-  }
-
   // Create a merge of the control dependencies of both environments and update
   // the current environment's control dependency accordingly.
   Node* control = builder()->MergeControl(GetControlDependency(),
@@ -295,7 +355,7 @@
 
 bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
     Node** state_values, int offset, int count) {
-  if (!builder()->info()->is_deoptimization_enabled()) {
+  if (!builder()->deoptimization_enabled_) {
     return false;
   }
   if (*state_values == nullptr) {
@@ -325,7 +385,7 @@
 
 Node* BytecodeGraphBuilder::Environment::Checkpoint(
     BailoutId bailout_id, OutputFrameStateCombine combine) {
-  if (!builder()->info()->is_deoptimization_enabled()) {
+  if (!builder()->deoptimization_enabled_) {
     return builder()->jsgraph()->EmptyFrameState();
   }
 
@@ -363,6 +423,7 @@
 
 bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
     int output_poke_offset, int output_poke_count) {
+  if (!builder()->deoptimization_enabled_) return true;
   // Poke offset is relative to the top of the stack (i.e., the accumulator).
   int output_poke_start = accumulator_base() - output_poke_offset;
   int output_poke_end = output_poke_start + output_poke_count;
@@ -375,26 +436,27 @@
                                 1, output_poke_start, output_poke_end);
 }
 
-
 BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
-                                           CompilationInfo* compilation_info,
+                                           CompilationInfo* info,
                                            JSGraph* jsgraph)
     : local_zone_(local_zone),
-      info_(compilation_info),
       jsgraph_(jsgraph),
-      bytecode_array_(handle(info()->shared_info()->bytecode_array())),
+      bytecode_array_(handle(info->shared_info()->bytecode_array())),
+      exception_handler_table_(
+          handle(HandlerTable::cast(bytecode_array()->handler_table()))),
+      feedback_vector_(info->feedback_vector()),
       frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
           FrameStateType::kInterpretedFunction,
           bytecode_array()->parameter_count(),
-          bytecode_array()->register_count(), info()->shared_info(),
-          CALL_MAINTAINS_NATIVE_CONTEXT)),
+          bytecode_array()->register_count(), info->shared_info())),
+      deoptimization_enabled_(info->is_deoptimization_enabled()),
       merge_environments_(local_zone),
-      loop_header_environments_(local_zone),
+      exception_handlers_(local_zone),
+      current_exception_handler_(0),
       input_buffer_size_(0),
       input_buffer_(nullptr),
       exit_controls_(local_zone) {}
 
-
 Node* BytecodeGraphBuilder::GetNewTarget() {
   if (!new_target_.is_set()) {
     int params = bytecode_array()->parameter_count();
@@ -430,21 +492,6 @@
 }
 
 
-Node* BytecodeGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
-  return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
-                 jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
-}
-
-
-Node* BytecodeGraphBuilder::BuildLoadImmutableObjectField(Node* object,
-                                                          int offset) {
-  return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
-                          object,
-                          jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
-                          graph()->start(), graph()->start());
-}
-
-
 Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
   const Operator* op =
       javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
@@ -453,30 +500,15 @@
 }
 
 
-Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
-  if (!feedback_vector_.is_set()) {
-    Node* closure = GetFunctionClosure();
-    Node* shared = BuildLoadImmutableObjectField(
-        closure, JSFunction::kSharedFunctionInfoOffset);
-    Node* vector = BuildLoadImmutableObjectField(
-        shared, SharedFunctionInfo::kFeedbackVectorOffset);
-    feedback_vector_.set(vector);
-  }
-  return feedback_vector_.get();
-}
-
-
 VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
-  Handle<TypeFeedbackVector> feedback_vector = info()->feedback_vector();
   FeedbackVectorSlot slot;
   if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
-    slot = feedback_vector->ToSlot(slot_id);
+    slot = feedback_vector()->ToSlot(slot_id);
   }
-  return VectorSlotPair(feedback_vector, slot);
+  return VectorSlotPair(feedback_vector(), slot);
 }
 
-
-bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
+bool BytecodeGraphBuilder::CreateGraph() {
   // Set up the basic structure of the graph. Outputs for {Start} are
   // the formal parameters (including the receiver) plus context and
   // closure.
@@ -492,7 +524,7 @@
                   GetFunctionContext());
   set_environment(&env);
 
-  CreateGraphBody(stack_check);
+  VisitBytecodes();
 
   // Finish the basic structure of the graph.
   DCHECK_NE(0u, exit_controls_.size());
@@ -504,20 +536,6 @@
   return true;
 }
 
-
-void BytecodeGraphBuilder::CreateGraphBody(bool stack_check) {
-  // TODO(oth): Review ast-graph-builder equivalent, i.e. arguments
-  // object setup, this function variable if used, tracing hooks.
-
-  if (stack_check) {
-    Node* node = NewNode(javascript()->StackCheck());
-    PrepareEntryFrameState(node);
-  }
-
-  VisitBytecodes();
-}
-
-
 void BytecodeGraphBuilder::VisitBytecodes() {
   BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
   analysis.Analyze();
@@ -526,14 +544,15 @@
   set_bytecode_iterator(&iterator);
   while (!iterator.done()) {
     int current_offset = iterator.current_offset();
-    if (analysis.is_reachable(current_offset)) {
-      MergeEnvironmentsOfForwardBranches(current_offset);
-      BuildLoopHeaderForBackwardBranches(current_offset);
+    EnterAndExitExceptionHandlers(current_offset);
+    SwitchToMergeEnvironment(current_offset);
+    if (environment() != nullptr) {
+      BuildLoopHeaderEnvironment(current_offset);
 
       switch (iterator.current_bytecode()) {
 #define BYTECODE_CASE(name, ...)       \
   case interpreter::Bytecode::k##name: \
-    Visit##name(iterator);             \
+    Visit##name();                     \
     break;
         BYTECODE_LIST(BYTECODE_CASE)
 #undef BYTECODE_CODE
@@ -543,635 +562,417 @@
   }
   set_branch_analysis(nullptr);
   set_bytecode_iterator(nullptr);
+  DCHECK(exception_handlers_.empty());
 }
 
-
-void BytecodeGraphBuilder::VisitLdaZero(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaZero() {
   Node* node = jsgraph()->ZeroConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaSmi8(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* node = jsgraph()->Constant(iterator.GetImmediateOperand(0));
+void BytecodeGraphBuilder::VisitLdaSmi8() {
+  Node* node = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* node = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+void BytecodeGraphBuilder::VisitLdaConstantWide() {
+  Node* node =
+      jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* node = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+void BytecodeGraphBuilder::VisitLdaConstant() {
+  Node* node =
+      jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaUndefined(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaUndefined() {
   Node* node = jsgraph()->UndefinedConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaNull(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaNull() {
   Node* node = jsgraph()->NullConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaTheHole(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaTheHole() {
   Node* node = jsgraph()->TheHoleConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaTrue(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaTrue() {
   Node* node = jsgraph()->TrueConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaFalse(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaFalse() {
   Node* node = jsgraph()->FalseConstant();
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitLdar(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* value = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::VisitLdar() {
+  Node* value =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   environment()->BindAccumulator(value);
 }
 
-
-void BytecodeGraphBuilder::VisitStar(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitStar() {
   Node* value = environment()->LookupAccumulator();
-  environment()->BindRegister(iterator.GetRegisterOperand(0), value);
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value);
 }
 
-
-void BytecodeGraphBuilder::VisitMov(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* value = environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  environment()->BindRegister(iterator.GetRegisterOperand(1), value);
+void BytecodeGraphBuilder::VisitMov() {
+  Node* value =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
 }
 
-
-void BytecodeGraphBuilder::VisitExchange(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
-                                   iterator.GetRegisterOperand(1));
-}
-
-
-void BytecodeGraphBuilder::VisitExchangeWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
-                                   iterator.GetRegisterOperand(1));
-}
-
+void BytecodeGraphBuilder::VisitMovWide() { VisitMov(); }
 
 void BytecodeGraphBuilder::BuildLoadGlobal(
-    const interpreter::BytecodeArrayIterator& iterator,
     TypeofMode typeof_mode) {
-  FrameStateBeforeAndAfter states(this, iterator);
+  FrameStateBeforeAndAfter states(this);
   Handle<Name> name =
-      Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
 
   const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
-  Node* node = NewNode(op, BuildLoadFeedbackVector());
+  Node* node = NewNode(op, GetFunctionClosure());
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaGlobalSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobal() {
+  BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaGlobalStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
+  BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobalWide() {
+  BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofWide() {
+  BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaGlobalSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::VisitLdaGlobalStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::BuildStoreGlobal(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
+  FrameStateBeforeAndAfter states(this);
   Handle<Name> name =
-      Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
   Node* value = environment()->LookupAccumulator();
 
-  const Operator* op =
-      javascript()->StoreGlobal(language_mode(), name, feedback);
-  Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
+  Node* node = NewNode(op, value, GetFunctionClosure());
   environment()->RecordAfterState(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitStaGlobalSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalSloppy() {
+  BuildStoreGlobal(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitStaGlobalStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalStrict() {
+  BuildStoreGlobal(LanguageMode::STRICT);
 }
 
-void BytecodeGraphBuilder::VisitStaGlobalSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalSloppyWide() {
+  BuildStoreGlobal(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitStaGlobalStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalStrictWide() {
+  BuildStoreGlobal(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaContextSlot(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaContextSlot() {
   // TODO(mythria): LoadContextSlots are unrolled by the required depth when
   // generating bytecode. Hence the value of depth is always 0. Update this
   // code, when the implementation changes.
   // TODO(mythria): immutable flag is also set to false. This information is not
   // available in bytecode array. update this code when the implementation
   // changes.
-  const Operator* op =
-      javascript()->LoadContext(0, iterator.GetIndexOperand(1), false);
-  Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+  const Operator* op = javascript()->LoadContext(
+      0, bytecode_iterator().GetIndexOperand(1), false);
+  Node* context =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* node = NewNode(op, context);
   environment()->BindAccumulator(node);
 }
 
+void BytecodeGraphBuilder::VisitLdaContextSlotWide() { VisitLdaContextSlot(); }
 
-void BytecodeGraphBuilder::VisitLdaContextSlotWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitLdaContextSlot(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitStaContextSlot(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitStaContextSlot() {
   // TODO(mythria): LoadContextSlots are unrolled by the required depth when
   // generating bytecode. Hence the value of depth is always 0. Update this
   // code, when the implementation changes.
   const Operator* op =
-      javascript()->StoreContext(0, iterator.GetIndexOperand(1));
-  Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+      javascript()->StoreContext(0, bytecode_iterator().GetIndexOperand(1));
+  Node* context =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* value = environment()->LookupAccumulator();
   NewNode(op, context, value);
 }
 
+void BytecodeGraphBuilder::VisitStaContextSlotWide() { VisitStaContextSlot(); }
 
-void BytecodeGraphBuilder::VisitStaContextSlotWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitStaContextSlot(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildLdaLookupSlot(
-    TypeofMode typeof_mode,
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Handle<String> name =
-      Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
-  const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
-  Node* value =
-      NewNode(op, BuildLoadFeedbackVector(), environment()->Context());
+void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
+  FrameStateBeforeAndAfter states(this);
+  Node* name =
+      jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+  const Operator* op =
+      javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+                                    ? Runtime::kLoadLookupSlot
+                                    : Runtime::kLoadLookupSlotInsideTypeof);
+  Node* value = NewNode(op, name);
   environment()->BindAccumulator(value, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaLookupSlot(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildLdaLookupSlot(TypeofMode::NOT_INSIDE_TYPEOF, iterator);
+void BytecodeGraphBuilder::VisitLdaLookupSlot() {
+  BuildLdaLookupSlot(TypeofMode::NOT_INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeof(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF, iterator);
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeof() {
+  BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF);
 }
 
-
-void BytecodeGraphBuilder::BuildStaLookupSlot(
-    LanguageMode language_mode,
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildStaLookupSlot(LanguageMode language_mode) {
+  FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
-  Node* name = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
-  Node* language = jsgraph()->Constant(language_mode);
-  const Operator* op = javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
-  Node* store = NewNode(op, value, environment()->Context(), name, language);
+  Node* name =
+      jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+  const Operator* op = javascript()->CallRuntime(
+      is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict
+                               : Runtime::kStoreLookupSlot_Sloppy);
+  Node* store = NewNode(op, name, value);
   environment()->BindAccumulator(store, &states);
 }
 
+void BytecodeGraphBuilder::VisitLdaLookupSlotWide() { VisitLdaLookupSlot(); }
 
-void BytecodeGraphBuilder::VisitLdaLookupSlotWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitLdaLookupSlot(iterator);
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide() {
+  VisitLdaLookupSlotInsideTypeof();
 }
 
-
-void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitLdaLookupSlotInsideTypeof(iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppy() {
+  BuildStaLookupSlot(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitStaLookupSlotSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildStaLookupSlot(LanguageMode::SLOPPY, iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotStrict() {
+  BuildStaLookupSlot(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::VisitStaLookupSlotStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildStaLookupSlot(LanguageMode::STRICT, iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide() {
+  VisitStaLookupSlotSloppy();
 }
 
-
-void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitStaLookupSlotSloppy(iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide() {
+  VisitStaLookupSlotStrict();
 }
 
-
-void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitStaLookupSlotStrict(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildNamedLoad(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::BuildNamedLoad() {
+  FrameStateBeforeAndAfter states(this);
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Handle<Name> name =
-      Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
-  const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
-  Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->LoadNamed(name, feedback);
+  Node* node = NewNode(op, object, GetFunctionClosure());
   environment()->BindAccumulator(node, &states);
 }
 
+void BytecodeGraphBuilder::VisitLoadIC() { BuildNamedLoad(); }
 
-void BytecodeGraphBuilder::VisitLoadICSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildNamedLoad(iterator);
-}
+void BytecodeGraphBuilder::VisitLoadICWide() { BuildNamedLoad(); }
 
-
-void BytecodeGraphBuilder::VisitLoadICStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildNamedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitLoadICSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildNamedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitLoadICStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildNamedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildKeyedLoad(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildKeyedLoad() {
+  FrameStateBeforeAndAfter states(this);
   Node* key = environment()->LookupAccumulator();
-  Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
 
-  const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
-  Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->LoadProperty(feedback);
+  Node* node = NewNode(op, object, key, GetFunctionClosure());
   environment()->BindAccumulator(node, &states);
 }
 
+void BytecodeGraphBuilder::VisitKeyedLoadIC() { BuildKeyedLoad(); }
 
-void BytecodeGraphBuilder::VisitKeyedLoadICSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildKeyedLoad(iterator);
-}
+void BytecodeGraphBuilder::VisitKeyedLoadICWide() { BuildKeyedLoad(); }
 
-
-void BytecodeGraphBuilder::VisitKeyedLoadICStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildKeyedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitKeyedLoadICSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildKeyedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitKeyedLoadICStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildKeyedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildNamedStore(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
+  FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
-  Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Handle<Name> name =
-      Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
-  const Operator* op =
-      javascript()->StoreNamed(language_mode(), name, feedback);
-  Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->StoreNamed(language_mode, name, feedback);
+  Node* node = NewNode(op, object, value, GetFunctionClosure());
   environment()->RecordAfterState(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitStoreICSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICSloppy() {
+  BuildNamedStore(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitStoreICStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICStrict() {
+  BuildNamedStore(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::VisitStoreICSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICSloppyWide() {
+  BuildNamedStore(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitStoreICStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICStrictWide() {
+  BuildNamedStore(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::BuildKeyedStore(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
+  FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
-  Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  Node* key = environment()->LookupRegister(iterator.GetRegisterOperand(1));
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* key =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
-  const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
-  Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
+  const Operator* op = javascript()->StoreProperty(language_mode, feedback);
+  Node* node = NewNode(op, object, key, value, GetFunctionClosure());
   environment()->RecordAfterState(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppy() {
+  BuildKeyedStore(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitKeyedStoreICStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICStrict() {
+  BuildKeyedStore(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide() {
+  BuildKeyedStore(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide() {
+  BuildKeyedStore(LanguageMode::STRICT);
 }
 
+void BytecodeGraphBuilder::VisitPushContext() {
+  Node* new_context = environment()->LookupAccumulator();
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0),
+                              environment()->Context());
+  environment()->SetContext(new_context);
+}
 
-void BytecodeGraphBuilder::VisitPushContext(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* context = environment()->LookupAccumulator();
-  environment()->BindRegister(iterator.GetRegisterOperand(0), context);
+void BytecodeGraphBuilder::VisitPopContext() {
+  Node* context =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   environment()->SetContext(context);
 }
 
-
-void BytecodeGraphBuilder::VisitPopContext(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  environment()->SetContext(context);
-}
-
-
-void BytecodeGraphBuilder::VisitCreateClosure(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Handle<SharedFunctionInfo> shared_info =
-      Handle<SharedFunctionInfo>::cast(iterator.GetConstantForIndexOperand(0));
+void BytecodeGraphBuilder::VisitCreateClosure() {
+  Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
+      bytecode_iterator().GetConstantForIndexOperand(0));
   PretenureFlag tenured =
-      iterator.GetImmediateOperand(1) ? TENURED : NOT_TENURED;
+      bytecode_iterator().GetImmediateOperand(1) ? TENURED : NOT_TENURED;
   const Operator* op = javascript()->CreateClosure(shared_info, tenured);
   Node* closure = NewNode(op);
   environment()->BindAccumulator(closure);
 }
 
+void BytecodeGraphBuilder::VisitCreateClosureWide() { VisitCreateClosure(); }
 
-void BytecodeGraphBuilder::VisitCreateClosureWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  VisitCreateClosure(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildCreateArguments(
-    CreateArgumentsParameters::Type type,
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  const Operator* op = javascript()->CreateArguments(type, 0);
+void BytecodeGraphBuilder::BuildCreateArguments(CreateArgumentsType type) {
+  FrameStateBeforeAndAfter states(this);
+  const Operator* op = javascript()->CreateArguments(type);
   Node* object = NewNode(op, GetFunctionClosure());
   environment()->BindAccumulator(object, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitCreateMappedArguments(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateArguments(CreateArgumentsParameters::kMappedArguments, iterator);
+void BytecodeGraphBuilder::VisitCreateMappedArguments() {
+  BuildCreateArguments(CreateArgumentsType::kMappedArguments);
 }
 
-
-void BytecodeGraphBuilder::VisitCreateUnmappedArguments(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateArguments(CreateArgumentsParameters::kUnmappedArguments, iterator);
+void BytecodeGraphBuilder::VisitCreateUnmappedArguments() {
+  BuildCreateArguments(CreateArgumentsType::kUnmappedArguments);
 }
 
+void BytecodeGraphBuilder::VisitCreateRestParameter() {
+  BuildCreateArguments(CreateArgumentsType::kRestParameter);
+}
 
-void BytecodeGraphBuilder::BuildCreateLiteral(
-    const Operator* op, const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCreateLiteral(const Operator* op) {
+  FrameStateBeforeAndAfter states(this);
   Node* literal = NewNode(op, GetFunctionClosure());
   environment()->BindAccumulator(literal, &states);
 }
 
-
-void BytecodeGraphBuilder::BuildCreateRegExpLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::BuildCreateRegExpLiteral() {
   Handle<String> constant_pattern =
-      Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
-  int literal_index = iterator.GetIndexOperand(1);
-  int literal_flags = iterator.GetImmediateOperand(2);
+      Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+  int literal_index = bytecode_iterator().GetIndexOperand(1);
+  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
   const Operator* op = javascript()->CreateLiteralRegExp(
       constant_pattern, literal_flags, literal_index);
-  BuildCreateLiteral(op, iterator);
+  BuildCreateLiteral(op);
 }
 
-
-void BytecodeGraphBuilder::VisitCreateRegExpLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateRegExpLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
+  BuildCreateRegExpLiteral();
 }
 
-
-void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateRegExpLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide() {
+  BuildCreateRegExpLiteral();
 }
 
-
-void BytecodeGraphBuilder::BuildCreateArrayLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Handle<FixedArray> constant_elements =
-      Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
-  int literal_index = iterator.GetIndexOperand(1);
-  int literal_flags = iterator.GetImmediateOperand(2);
+void BytecodeGraphBuilder::BuildCreateArrayLiteral() {
+  Handle<FixedArray> constant_elements = Handle<FixedArray>::cast(
+      bytecode_iterator().GetConstantForIndexOperand(0));
+  int literal_index = bytecode_iterator().GetIndexOperand(1);
+  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
   const Operator* op = javascript()->CreateLiteralArray(
       constant_elements, literal_flags, literal_index);
-  BuildCreateLiteral(op, iterator);
+  BuildCreateLiteral(op);
 }
 
-
-void BytecodeGraphBuilder::VisitCreateArrayLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateArrayLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
+  BuildCreateArrayLiteral();
 }
 
-
-void BytecodeGraphBuilder::VisitCreateArrayLiteralWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateArrayLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateArrayLiteralWide() {
+  BuildCreateArrayLiteral();
 }
 
-
-void BytecodeGraphBuilder::BuildCreateObjectLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Handle<FixedArray> constant_properties =
-      Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
-  int literal_index = iterator.GetIndexOperand(1);
-  int literal_flags = iterator.GetImmediateOperand(2);
+void BytecodeGraphBuilder::BuildCreateObjectLiteral() {
+  Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
+      bytecode_iterator().GetConstantForIndexOperand(0));
+  int literal_index = bytecode_iterator().GetIndexOperand(1);
+  int literal_flags = bytecode_iterator().GetImmediateOperand(2);
   const Operator* op = javascript()->CreateLiteralObject(
       constant_properties, literal_flags, literal_index);
-  BuildCreateLiteral(op, iterator);
+  BuildCreateLiteral(op);
 }
 
-
-void BytecodeGraphBuilder::VisitCreateObjectLiteral(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateObjectLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
+  BuildCreateObjectLiteral();
 }
 
-
-void BytecodeGraphBuilder::VisitCreateObjectLiteralWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCreateObjectLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateObjectLiteralWide() {
+  BuildCreateObjectLiteral();
 }
 
 
@@ -1179,7 +980,7 @@
                                                  Node* callee,
                                                  interpreter::Register receiver,
                                                  size_t arity) {
-  Node** all = info()->zone()->NewArray<Node*>(static_cast<int>(arity));
+  Node** all = local_zone()->NewArray<Node*>(static_cast<int>(arity));
   all[0] = callee;
   all[1] = environment()->LookupRegister(receiver);
   int receiver_index = receiver.index();
@@ -1191,57 +992,58 @@
   return value;
 }
 
-
-void BytecodeGraphBuilder::BuildCall(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode) {
+  FrameStateBeforeAndAfter states(this);
   // TODO(rmcilroy): Set receiver_hint correctly based on whether the receiver
   // register has been loaded with null / undefined explicitly or we are sure it
   // is not null / undefined.
   ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
-  Node* callee = environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  interpreter::Register receiver = iterator.GetRegisterOperand(1);
-  size_t arg_count = iterator.GetCountOperand(2);
-  VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(3));
+  Node* callee =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
 
   const Operator* call = javascript()->CallFunction(
-      arg_count + 2, language_mode(), feedback, receiver_hint);
-  Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+      arg_count + 1, feedback, receiver_hint, tail_call_mode);
+  Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
   environment()->BindAccumulator(value, &states);
 }
 
+void BytecodeGraphBuilder::VisitCall() { BuildCall(TailCallMode::kDisallow); }
 
-void BytecodeGraphBuilder::VisitCall(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCall(iterator);
+void BytecodeGraphBuilder::VisitCallWide() {
+  BuildCall(TailCallMode::kDisallow);
 }
 
+void BytecodeGraphBuilder::VisitTailCall() { BuildCall(TailCallMode::kAllow); }
 
-void BytecodeGraphBuilder::VisitCallWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCall(iterator);
+void BytecodeGraphBuilder::VisitTailCallWide() {
+  BuildCall(TailCallMode::kAllow);
 }
 
-
-void BytecodeGraphBuilder::VisitCallJSRuntime(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* callee = BuildLoadNativeContextField(iterator.GetIndexOperand(0));
-  interpreter::Register receiver = iterator.GetRegisterOperand(1);
-  size_t arg_count = iterator.GetCountOperand(2);
+void BytecodeGraphBuilder::BuildCallJSRuntime() {
+  FrameStateBeforeAndAfter states(this);
+  Node* callee =
+      BuildLoadNativeContextField(bytecode_iterator().GetIndexOperand(0));
+  interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
   // Create node to perform the JS runtime call.
-  const Operator* call =
-      javascript()->CallFunction(arg_count + 2, language_mode());
-  Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+  const Operator* call = javascript()->CallFunction(arg_count + 1);
+  Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
   environment()->BindAccumulator(value, &states);
 }
 
+void BytecodeGraphBuilder::VisitCallJSRuntime() { BuildCallJSRuntime(); }
+
+void BytecodeGraphBuilder::VisitCallJSRuntimeWide() { BuildCallJSRuntime(); }
 
 Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
     const Operator* call_runtime_op, interpreter::Register first_arg,
     size_t arity) {
-  Node** all = info()->zone()->NewArray<Node*>(arity);
+  Node** all = local_zone()->NewArray<Node*>(arity);
   int first_arg_index = first_arg.index();
   for (int i = 0; i < static_cast<int>(arity); ++i) {
     all[i] = environment()->LookupRegister(
@@ -1251,14 +1053,12 @@
   return value;
 }
 
-
-void BytecodeGraphBuilder::VisitCallRuntime(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCallRuntime() {
+  FrameStateBeforeAndAfter states(this);
   Runtime::FunctionId functionId =
-      static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
-  interpreter::Register first_arg = iterator.GetRegisterOperand(1);
-  size_t arg_count = iterator.GetCountOperand(2);
+      static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+  interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
   // Create node to perform the runtime call.
   const Operator* call = javascript()->CallRuntime(functionId, arg_count);
@@ -1266,15 +1066,18 @@
   environment()->BindAccumulator(value, &states);
 }
 
+void BytecodeGraphBuilder::VisitCallRuntime() { BuildCallRuntime(); }
 
-void BytecodeGraphBuilder::VisitCallRuntimeForPair(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::VisitCallRuntimeWide() { BuildCallRuntime(); }
+
+void BytecodeGraphBuilder::BuildCallRuntimeForPair() {
+  FrameStateBeforeAndAfter states(this);
   Runtime::FunctionId functionId =
-      static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
-  interpreter::Register first_arg = iterator.GetRegisterOperand(1);
-  size_t arg_count = iterator.GetCountOperand(2);
-  interpreter::Register first_return = iterator.GetRegisterOperand(3);
+      static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+  interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+  interpreter::Register first_return =
+      bytecode_iterator().GetRegisterOperand(3);
 
   // Create node to perform the runtime call.
   const Operator* call = javascript()->CallRuntime(functionId, arg_count);
@@ -1282,164 +1085,151 @@
   environment()->BindRegistersToProjections(first_return, return_pair, &states);
 }
 
+void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
+  BuildCallRuntimeForPair();
+}
+
+void BytecodeGraphBuilder::VisitCallRuntimeForPairWide() {
+  BuildCallRuntimeForPair();
+}
 
 Node* BytecodeGraphBuilder::ProcessCallNewArguments(
-    const Operator* call_new_op, interpreter::Register callee,
+    const Operator* call_new_op, Node* callee, Node* new_target,
     interpreter::Register first_arg, size_t arity) {
-  Node** all = info()->zone()->NewArray<Node*>(arity);
-  all[0] = environment()->LookupRegister(callee);
+  Node** all = local_zone()->NewArray<Node*>(arity);
+  all[0] = new_target;
   int first_arg_index = first_arg.index();
   for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
     all[i] = environment()->LookupRegister(
         interpreter::Register(first_arg_index + i - 1));
   }
-  // Original constructor is the same as the callee.
-  all[arity - 1] = environment()->LookupRegister(callee);
+  all[arity - 1] = callee;
   Node* value = MakeNode(call_new_op, static_cast<int>(arity), all, false);
   return value;
 }
 
+void BytecodeGraphBuilder::BuildCallConstruct() {
+  FrameStateBeforeAndAfter states(this);
+  interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
+  interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
-void BytecodeGraphBuilder::VisitNew(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  interpreter::Register callee = iterator.GetRegisterOperand(0);
-  interpreter::Register first_arg = iterator.GetRegisterOperand(1);
-  size_t arg_count = iterator.GetCountOperand(2);
-
+  Node* new_target = environment()->LookupAccumulator();
+  Node* callee = environment()->LookupRegister(callee_reg);
   // TODO(turbofan): Pass the feedback here.
   const Operator* call = javascript()->CallConstruct(
       static_cast<int>(arg_count) + 2, VectorSlotPair());
-  Node* value = ProcessCallNewArguments(call, callee, first_arg, arg_count + 2);
+  Node* value = ProcessCallNewArguments(call, callee, new_target, first_arg,
+                                        arg_count + 2);
   environment()->BindAccumulator(value, &states);
 }
 
+void BytecodeGraphBuilder::VisitNew() { BuildCallConstruct(); }
 
-void BytecodeGraphBuilder::VisitThrow(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::VisitNewWide() { BuildCallConstruct(); }
+
+void BytecodeGraphBuilder::BuildThrow() {
+  FrameStateBeforeAndAfter states(this);
   Node* value = environment()->LookupAccumulator();
-  // TODO(mythria): Change to Runtime::kThrow when we have deoptimization
-  // information support in the interpreter.
-  NewNode(javascript()->CallRuntime(Runtime::kReThrow, 1), value);
-  Node* control = NewNode(common()->Throw(), value);
-  environment()->RecordAfterState(control, &states);
-  UpdateControlDependencyToLeaveFunction(control);
+  Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
+  environment()->BindAccumulator(call, &states);
 }
 
+void BytecodeGraphBuilder::VisitThrow() {
+  BuildThrow();
+  Node* call = environment()->LookupAccumulator();
+  Node* control = NewNode(common()->Throw(), call);
+  MergeControlToLeaveFunction(control);
+}
 
-void BytecodeGraphBuilder::BuildBinaryOp(
-    const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::VisitReThrow() {
+  Node* value = environment()->LookupAccumulator();
+  Node* call = NewNode(javascript()->CallRuntime(Runtime::kReThrow), value);
+  Node* control = NewNode(common()->Throw(), call);
+  MergeControlToLeaveFunction(control);
+}
+
+void BytecodeGraphBuilder::BuildBinaryOp(const Operator* js_op) {
+  FrameStateBeforeAndAfter states(this);
+  Node* left =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* right = environment()->LookupAccumulator();
   Node* node = NewNode(js_op, left, right);
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitAdd(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitAdd() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Add(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->Add(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitSub(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitSub() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Subtract(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->Subtract(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitMul(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitMul() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Multiply(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->Multiply(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitDiv(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitDiv() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Divide(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->Divide(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitMod(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitMod() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->Modulus(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->Modulus(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitBitwiseOr(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitBitwiseOr() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->BitwiseOr(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->BitwiseOr(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitBitwiseXor(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitBitwiseXor() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->BitwiseXor(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->BitwiseXor(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitBitwiseAnd(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitBitwiseAnd() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->BitwiseAnd(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->BitwiseAnd(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitShiftLeft(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitShiftLeft() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->ShiftLeft(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->ShiftLeft(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitShiftRight(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitShiftRight() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->ShiftRight(language_mode(), hints), iterator);
+  BuildBinaryOp(javascript()->ShiftRight(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitShiftRightLogical(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitShiftRightLogical() {
   BinaryOperationHints hints = BinaryOperationHints::Any();
-  BuildBinaryOp(javascript()->ShiftRightLogical(language_mode(), hints),
-                iterator);
+  BuildBinaryOp(javascript()->ShiftRightLogical(hints));
 }
 
-
-void BytecodeGraphBuilder::VisitInc(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  const Operator* js_op =
-      javascript()->Add(language_mode(), BinaryOperationHints::Any());
+void BytecodeGraphBuilder::VisitInc() {
+  FrameStateBeforeAndAfter states(this);
+  const Operator* js_op = javascript()->Add(BinaryOperationHints::Any());
   Node* node = NewNode(js_op, environment()->LookupAccumulator(),
                        jsgraph()->OneConstant());
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitDec(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  const Operator* js_op =
-      javascript()->Subtract(language_mode(), BinaryOperationHints::Any());
+void BytecodeGraphBuilder::VisitDec() {
+  FrameStateBeforeAndAfter states(this);
+  const Operator* js_op = javascript()->Subtract(BinaryOperationHints::Any());
   Node* node = NewNode(js_op, environment()->LookupAccumulator(),
                        jsgraph()->OneConstant());
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitLogicalNot(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLogicalNot() {
   Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
                         environment()->LookupAccumulator());
   Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
@@ -1447,408 +1237,307 @@
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::VisitTypeOf(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitTypeOf() {
   Node* node =
       NewNode(javascript()->TypeOf(), environment()->LookupAccumulator());
   environment()->BindAccumulator(node);
 }
 
-
-void BytecodeGraphBuilder::BuildDelete(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildDelete(LanguageMode language_mode) {
+  FrameStateBeforeAndAfter states(this);
   Node* key = environment()->LookupAccumulator();
-  Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* node =
-      NewNode(javascript()->DeleteProperty(language_mode()), object, key);
+      NewNode(javascript()->DeleteProperty(language_mode), object, key);
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitDeletePropertyStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_strict(language_mode()));
-  BuildDelete(iterator);
+void BytecodeGraphBuilder::VisitDeletePropertyStrict() {
+  BuildDelete(LanguageMode::STRICT);
 }
 
-
-void BytecodeGraphBuilder::VisitDeletePropertySloppy(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  DCHECK(is_sloppy(language_mode()));
-  BuildDelete(iterator);
+void BytecodeGraphBuilder::VisitDeletePropertySloppy() {
+  BuildDelete(LanguageMode::SLOPPY);
 }
 
-
-void BytecodeGraphBuilder::VisitDeleteLookupSlot(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* name = environment()->LookupAccumulator();
-  const Operator* op = javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
-  Node* result = NewNode(op, environment()->Context(), name);
-  environment()->BindAccumulator(result, &states);
-}
-
-
-void BytecodeGraphBuilder::BuildCompareOp(
-    const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
+  FrameStateBeforeAndAfter states(this);
+  Node* left =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* right = environment()->LookupAccumulator();
   Node* node = NewNode(js_op, left, right);
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitTestEqual(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->Equal(), iterator);
+void BytecodeGraphBuilder::VisitTestEqual() {
+  BuildCompareOp(javascript()->Equal());
 }
 
-
-void BytecodeGraphBuilder::VisitTestNotEqual(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->NotEqual(), iterator);
+void BytecodeGraphBuilder::VisitTestNotEqual() {
+  BuildCompareOp(javascript()->NotEqual());
 }
 
-
-void BytecodeGraphBuilder::VisitTestEqualStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->StrictEqual(), iterator);
+void BytecodeGraphBuilder::VisitTestEqualStrict() {
+  BuildCompareOp(javascript()->StrictEqual());
 }
 
-
-void BytecodeGraphBuilder::VisitTestNotEqualStrict(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->StrictNotEqual(), iterator);
+void BytecodeGraphBuilder::VisitTestNotEqualStrict() {
+  BuildCompareOp(javascript()->StrictNotEqual());
 }
 
-
-void BytecodeGraphBuilder::VisitTestLessThan(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->LessThan(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestLessThan() {
+  BuildCompareOp(javascript()->LessThan());
 }
 
-
-void BytecodeGraphBuilder::VisitTestGreaterThan(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->GreaterThan(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestGreaterThan() {
+  BuildCompareOp(javascript()->GreaterThan());
 }
 
-
-void BytecodeGraphBuilder::VisitTestLessThanOrEqual(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->LessThanOrEqual(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestLessThanOrEqual() {
+  BuildCompareOp(javascript()->LessThanOrEqual());
 }
 
-
-void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->GreaterThanOrEqual(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
+  BuildCompareOp(javascript()->GreaterThanOrEqual());
 }
 
-
-void BytecodeGraphBuilder::VisitTestIn(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->HasProperty(), iterator);
+void BytecodeGraphBuilder::VisitTestIn() {
+  BuildCompareOp(javascript()->HasProperty());
 }
 
-
-void BytecodeGraphBuilder::VisitTestInstanceOf(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCompareOp(javascript()->InstanceOf(), iterator);
+void BytecodeGraphBuilder::VisitTestInstanceOf() {
+  BuildCompareOp(javascript()->InstanceOf());
 }
 
-
-void BytecodeGraphBuilder::BuildCastOperator(
-    const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
+  FrameStateBeforeAndAfter states(this);
   Node* node = NewNode(js_op, environment()->LookupAccumulator());
   environment()->BindAccumulator(node, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitToName(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCastOperator(javascript()->ToName(), iterator);
+void BytecodeGraphBuilder::VisitToName() {
+  BuildCastOperator(javascript()->ToName());
 }
 
-
-void BytecodeGraphBuilder::VisitToObject(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCastOperator(javascript()->ToObject(), iterator);
+void BytecodeGraphBuilder::VisitToObject() {
+  BuildCastOperator(javascript()->ToObject());
 }
 
-
-void BytecodeGraphBuilder::VisitToNumber(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildCastOperator(javascript()->ToNumber(), iterator);
+void BytecodeGraphBuilder::VisitToNumber() {
+  BuildCastOperator(javascript()->ToNumber());
 }
 
+void BytecodeGraphBuilder::VisitJump() { BuildJump(); }
 
-void BytecodeGraphBuilder::VisitJump(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildJump();
-}
+void BytecodeGraphBuilder::VisitJumpConstant() { BuildJump(); }
 
+void BytecodeGraphBuilder::VisitJumpConstantWide() { BuildJump(); }
 
-void BytecodeGraphBuilder::VisitJumpConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildJump();
-}
-
-
-void BytecodeGraphBuilder::VisitJumpConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  BuildJump();
-}
-
-
-void BytecodeGraphBuilder::VisitJumpIfTrue(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfTrue() {
   BuildJumpIfEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfTrueConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfTrueConstant() {
   BuildJumpIfEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide() {
   BuildJumpIfEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfFalse(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfFalse() {
   BuildJumpIfEqual(jsgraph()->FalseConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfFalseConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfFalseConstant() {
   BuildJumpIfEqual(jsgraph()->FalseConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide() {
   BuildJumpIfEqual(jsgraph()->FalseConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue() {
   BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant() {
   BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide() {
   BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse() {
   BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
   BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide() {
   BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
 }
 
+void BytecodeGraphBuilder::VisitJumpIfNotHole() { BuildJumpIfNotHole(); }
 
-void BytecodeGraphBuilder::VisitJumpIfNull(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfNotHoleConstant() {
+  BuildJumpIfNotHole();
+}
+
+void BytecodeGraphBuilder::VisitJumpIfNotHoleConstantWide() {
+  BuildJumpIfNotHole();
+}
+
+void BytecodeGraphBuilder::VisitJumpIfNull() {
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfNullConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfNullConstant() {
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfNullConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfNullConstantWide() {
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfUndefined(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfUndefined() {
   BuildJumpIfEqual(jsgraph()->UndefinedConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant() {
   BuildJumpIfEqual(jsgraph()->UndefinedConstant());
 }
 
-
-void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide() {
   BuildJumpIfEqual(jsgraph()->UndefinedConstant());
 }
 
+void BytecodeGraphBuilder::VisitStackCheck() {
+  FrameStateBeforeAndAfter states(this);
+  Node* node = NewNode(javascript()->StackCheck());
+  environment()->RecordAfterState(node, &states);
+}
 
-void BytecodeGraphBuilder::VisitReturn(
-    const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitReturn() {
   Node* control =
       NewNode(common()->Return(), environment()->LookupAccumulator());
-  UpdateControlDependencyToLeaveFunction(control);
-  set_environment(nullptr);
+  MergeControlToLeaveFunction(control);
 }
 
-
-void BytecodeGraphBuilder::VisitForInPrepare(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  Node* prepare = nullptr;
-  {
-    FrameStateBeforeAndAfter states(this, iterator);
-    Node* receiver = environment()->LookupAccumulator();
-    prepare = NewNode(javascript()->ForInPrepare(), receiver);
-    environment()->RecordAfterState(prepare, &states);
-  }
-  // Project cache_type, cache_array, cache_length into register
-  // operands 1, 2, 3.
-  for (int i = 0; i < 3; i++) {
-    environment()->BindRegister(iterator.GetRegisterOperand(i),
-                                NewNode(common()->Projection(i), prepare));
-  }
+void BytecodeGraphBuilder::VisitDebugger() {
+  FrameStateBeforeAndAfter states(this);
+  Node* call =
+      NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
+  environment()->BindAccumulator(call, &states);
 }
 
+// We cannot create a graph from the debugger copy of the bytecode array.
+#define DEBUG_BREAK(Name, ...) \
+  void BytecodeGraphBuilder::Visit##Name() { UNREACHABLE(); }
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
+#undef DEBUG_BREAK
 
-void BytecodeGraphBuilder::VisitForInDone(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::BuildForInPrepare() {
+  FrameStateBeforeAndAfter states(this);
+  Node* receiver = environment()->LookupAccumulator();
+  Node* prepare = NewNode(javascript()->ForInPrepare(), receiver);
+  environment()->BindRegistersToProjections(
+      bytecode_iterator().GetRegisterOperand(0), prepare, &states);
+}
+
+void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
+
+void BytecodeGraphBuilder::VisitForInPrepareWide() { BuildForInPrepare(); }
+
+void BytecodeGraphBuilder::VisitForInDone() {
+  FrameStateBeforeAndAfter states(this);
+  Node* index =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* cache_length =
-      environment()->LookupRegister(iterator.GetRegisterOperand(1));
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
   Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
   environment()->BindAccumulator(exit_cond, &states);
 }
 
-
-void BytecodeGraphBuilder::VisitForInNext(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildForInNext() {
+  FrameStateBeforeAndAfter states(this);
   Node* receiver =
-      environment()->LookupRegister(iterator.GetRegisterOperand(0));
-  Node* cache_type =
-      environment()->LookupRegister(iterator.GetRegisterOperand(1));
-  Node* cache_array =
-      environment()->LookupRegister(iterator.GetRegisterOperand(2));
-  Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(3));
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* index =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+  int catch_reg_pair_index = bytecode_iterator().GetRegisterOperand(2).index();
+  Node* cache_type = environment()->LookupRegister(
+      interpreter::Register(catch_reg_pair_index));
+  Node* cache_array = environment()->LookupRegister(
+      interpreter::Register(catch_reg_pair_index + 1));
+
   Node* value = NewNode(javascript()->ForInNext(), receiver, cache_array,
                         cache_type, index);
   environment()->BindAccumulator(value, &states);
 }
 
+void BytecodeGraphBuilder::VisitForInNext() { BuildForInNext(); }
 
-void BytecodeGraphBuilder::VisitForInStep(
-    const interpreter::BytecodeArrayIterator& iterator) {
-  FrameStateBeforeAndAfter states(this, iterator);
-  Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::VisitForInNextWide() { BuildForInNext(); }
+
+void BytecodeGraphBuilder::VisitForInStep() {
+  FrameStateBeforeAndAfter states(this);
+  Node* index =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   index = NewNode(javascript()->ForInStep(), index);
   environment()->BindAccumulator(index, &states);
 }
 
-
-void BytecodeGraphBuilder::MergeEnvironmentsOfBackwardBranches(
-    int source_offset, int target_offset) {
-  DCHECK_GE(source_offset, target_offset);
-  const ZoneVector<int>* branch_sites =
-      branch_analysis()->BackwardBranchesTargetting(target_offset);
-  if (branch_sites->back() == source_offset) {
-    // The set of back branches is complete, merge them.
-    DCHECK_GE(branch_sites->at(0), target_offset);
-    Environment* merged = merge_environments_[branch_sites->at(0)];
-    for (size_t i = 1; i < branch_sites->size(); i++) {
-      DCHECK_GE(branch_sites->at(i), target_offset);
-      merged->Merge(merge_environments_[branch_sites->at(i)]);
+void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
+  if (merge_environments_[current_offset] != nullptr) {
+    if (environment() != nullptr) {
+      merge_environments_[current_offset]->Merge(environment());
     }
-    // And now merge with loop header environment created when loop
-    // header was visited.
-    loop_header_environments_[target_offset]->Merge(merged);
+    set_environment(merge_environments_[current_offset]);
   }
 }
 
-
-void BytecodeGraphBuilder::MergeEnvironmentsOfForwardBranches(
-    int source_offset) {
-  if (branch_analysis()->forward_branches_target(source_offset)) {
-    // Merge environments of branches that reach this bytecode.
-    auto branch_sites =
-        branch_analysis()->ForwardBranchesTargetting(source_offset);
-    DCHECK_LT(branch_sites->at(0), source_offset);
-    Environment* merged = merge_environments_[branch_sites->at(0)];
-    for (size_t i = 1; i < branch_sites->size(); i++) {
-      DCHECK_LT(branch_sites->at(i), source_offset);
-      merged->Merge(merge_environments_[branch_sites->at(i)]);
-    }
-    if (environment()) {
-      merged->Merge(environment());
-    }
-    set_environment(merged);
-  }
-}
-
-
-void BytecodeGraphBuilder::BuildLoopHeaderForBackwardBranches(
-    int source_offset) {
-  if (branch_analysis()->backward_branches_target(source_offset)) {
+void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
+  if (branch_analysis()->backward_branches_target(current_offset)) {
     // Add loop header and store a copy so we can connect merged back
     // edge inputs to the loop header.
-    loop_header_environments_[source_offset] = environment()->CopyForLoop();
+    merge_environments_[current_offset] = environment()->CopyForLoop();
   }
 }
 
-
-void BytecodeGraphBuilder::BuildJump(int source_offset, int target_offset) {
-  DCHECK_NULL(merge_environments_[source_offset]);
-  merge_environments_[source_offset] = environment();
-  if (source_offset >= target_offset) {
-    MergeEnvironmentsOfBackwardBranches(source_offset, target_offset);
+void BytecodeGraphBuilder::MergeIntoSuccessorEnvironment(int target_offset) {
+  if (merge_environments_[target_offset] == nullptr) {
+    // Append merge nodes to the environment. We may merge here with another
+    // environment. So add a place holder for merge nodes. We may add redundant
+    // but will be eliminated in a later pass.
+    // TODO(mstarzinger): Be smarter about this!
+    NewMerge();
+    merge_environments_[target_offset] = environment();
+  } else {
+    merge_environments_[target_offset]->Merge(environment());
   }
   set_environment(nullptr);
 }
 
+void BytecodeGraphBuilder::MergeControlToLeaveFunction(Node* exit) {
+  exit_controls_.push_back(exit);
+  set_environment(nullptr);
+}
 
 void BytecodeGraphBuilder::BuildJump() {
-  int source_offset = bytecode_iterator()->current_offset();
-  int target_offset = bytecode_iterator()->GetJumpTargetOffset();
-  BuildJump(source_offset, target_offset);
+  MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
 }
 
 
 void BytecodeGraphBuilder::BuildConditionalJump(Node* condition) {
-  int source_offset = bytecode_iterator()->current_offset();
   NewBranch(condition);
   Environment* if_false_environment = environment()->CopyForConditional();
   NewIfTrue();
-  BuildJump(source_offset, bytecode_iterator()->GetJumpTargetOffset());
+  MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
   set_environment(if_false_environment);
   NewIfFalse();
 }
@@ -1870,6 +1559,15 @@
   BuildConditionalJump(condition);
 }
 
+void BytecodeGraphBuilder::BuildJumpIfNotHole() {
+  Node* accumulator = environment()->LookupAccumulator();
+  Node* condition = NewNode(javascript()->StrictEqual(), accumulator,
+                            jsgraph()->TheHoleConstant());
+  Node* node =
+      NewNode(common()->Select(MachineRepresentation::kTagged), condition,
+              jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+  BuildConditionalJump(node);
+}
 
 Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
   if (size > input_buffer_size_) {
@@ -1880,17 +1578,32 @@
   return input_buffer_;
 }
 
+void BytecodeGraphBuilder::EnterAndExitExceptionHandlers(int current_offset) {
+  Handle<HandlerTable> table = exception_handler_table();
+  int num_entries = table->NumberOfRangeEntries();
 
-void BytecodeGraphBuilder::PrepareEntryFrameState(Node* node) {
-  DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
-  DCHECK_EQ(IrOpcode::kDead,
-            NodeProperties::GetFrameStateInput(node, 0)->opcode());
-  NodeProperties::ReplaceFrameStateInput(
-      node, 0, environment()->Checkpoint(BailoutId(0),
-                                         OutputFrameStateCombine::Ignore()));
+  // Potentially exit exception handlers.
+  while (!exception_handlers_.empty()) {
+    int current_end = exception_handlers_.top().end_offset_;
+    if (current_offset < current_end) break;  // Still covered by range.
+    exception_handlers_.pop();
+  }
+
+  // Potentially enter exception handlers.
+  while (current_exception_handler_ < num_entries) {
+    int next_start = table->GetRangeStart(current_exception_handler_);
+    if (current_offset < next_start) break;  // Not yet covered by range.
+    int next_end = table->GetRangeEnd(current_exception_handler_);
+    int next_handler = table->GetRangeHandler(current_exception_handler_);
+    int context_register = table->GetRangeData(current_exception_handler_);
+    CatchPrediction pred =
+        table->GetRangePrediction(current_exception_handler_);
+    exception_handlers_.push(
+        {next_start, next_end, next_handler, context_register, pred});
+    current_exception_handler_++;
+  }
 }
 
-
 Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
                                      Node** value_inputs, bool incomplete) {
   DCHECK_EQ(op->ValueInputCount(), value_input_count);
@@ -1907,6 +1620,7 @@
   if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
     result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
   } else {
+    bool inside_handler = !exception_handlers_.empty();
     int input_count_with_deps = value_input_count;
     if (has_context) ++input_count_with_deps;
     input_count_with_deps += frame_state_count;
@@ -1931,21 +1645,40 @@
       *current_input++ = environment()->GetControlDependency();
     }
     result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
-    if (!environment()->IsMarkedAsUnreachable()) {
-      // Update the current control dependency for control-producing nodes.
-      if (NodeProperties::IsControl(result)) {
-        environment()->UpdateControlDependency(result);
-      }
-      // Update the current effect dependency for effect-producing nodes.
-      if (result->op()->EffectOutputCount() > 0) {
-        environment()->UpdateEffectDependency(result);
-      }
-      // Add implicit success continuation for throwing nodes.
-      if (!result->op()->HasProperty(Operator::kNoThrow)) {
-        const Operator* if_success = common()->IfSuccess();
-        Node* on_success = graph()->NewNode(if_success, result);
-        environment_->UpdateControlDependency(on_success);
-      }
+    // Update the current control dependency for control-producing nodes.
+    if (NodeProperties::IsControl(result)) {
+      environment()->UpdateControlDependency(result);
+    }
+    // Update the current effect dependency for effect-producing nodes.
+    if (result->op()->EffectOutputCount() > 0) {
+      environment()->UpdateEffectDependency(result);
+    }
+    // Add implicit exception continuation for throwing nodes.
+    if (!result->op()->HasProperty(Operator::kNoThrow) && inside_handler) {
+      int handler_offset = exception_handlers_.top().handler_offset_;
+      int context_index = exception_handlers_.top().context_register_;
+      CatchPrediction prediction = exception_handlers_.top().pred_;
+      interpreter::Register context_register(context_index);
+      IfExceptionHint hint = prediction == CatchPrediction::CAUGHT
+                                 ? IfExceptionHint::kLocallyCaught
+                                 : IfExceptionHint::kLocallyUncaught;
+      Environment* success_env = environment()->CopyForConditional();
+      const Operator* op = common()->IfException(hint);
+      Node* effect = environment()->GetEffectDependency();
+      Node* on_exception = graph()->NewNode(op, effect, result);
+      Node* context = environment()->LookupRegister(context_register);
+      environment()->UpdateControlDependency(on_exception);
+      environment()->UpdateEffectDependency(on_exception);
+      environment()->BindAccumulator(on_exception);
+      environment()->SetContext(context);
+      MergeIntoSuccessorEnvironment(handler_offset);
+      set_environment(success_env);
+    }
+    // Add implicit success continuation for throwing nodes.
+    if (!result->op()->HasProperty(Operator::kNoThrow)) {
+      const Operator* if_success = common()->IfSuccess();
+      Node* on_success = graph()->NewNode(if_success, result);
+      environment()->UpdateControlDependency(on_success);
     }
   }
 
@@ -2028,13 +1761,6 @@
   return value;
 }
 
-
-void BytecodeGraphBuilder::UpdateControlDependencyToLeaveFunction(Node* exit) {
-  if (environment()->IsMarkedAsUnreachable()) return;
-  environment()->MarkAsUnreachable();
-  exit_controls_.push_back(exit);
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
index 94a278c..2fa5967 100644
--- a/src/compiler/bytecode-graph-builder.h
+++ b/src/compiler/bytecode-graph-builder.h
@@ -23,19 +23,14 @@
                        JSGraph* jsgraph);
 
   // Creates a graph by visiting bytecodes.
-  bool CreateGraph(bool stack_check = true);
-
-  Graph* graph() const { return jsgraph_->graph(); }
+  bool CreateGraph();
 
  private:
   class Environment;
   class FrameStateBeforeAndAfter;
 
-  void CreateGraphBody(bool stack_check);
   void VisitBytecodes();
 
-  Node* LoadAccumulator(Node* value);
-
   // Get or create the node that represents the outer function closure.
   Node* GetFunctionClosure();
 
@@ -45,13 +40,6 @@
   // Get or create the node that represents the incoming new target value.
   Node* GetNewTarget();
 
-  // Builder for accessing a (potentially immutable) object field.
-  Node* BuildLoadObjectField(Node* object, int offset);
-  Node* BuildLoadImmutableObjectField(Node* object, int offset);
-
-  // Builder for accessing type feedback vector.
-  Node* BuildLoadFeedbackVector();
-
   // Builder for loading the a native context field.
   Node* BuildLoadNativeContextField(int index);
 
@@ -111,91 +99,102 @@
   Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
                  bool incomplete);
 
-  // Helper to indicate a node exits the function body.
-  void UpdateControlDependencyToLeaveFunction(Node* exit);
-
   Node** EnsureInputBufferSize(int size);
 
   Node* ProcessCallArguments(const Operator* call_op, Node* callee,
                              interpreter::Register receiver, size_t arity);
-  Node* ProcessCallNewArguments(const Operator* call_new_op,
-                                interpreter::Register callee,
+  Node* ProcessCallNewArguments(const Operator* call_new_op, Node* callee,
+                                Node* new_target,
                                 interpreter::Register first_arg, size_t arity);
   Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
                                     interpreter::Register first_arg,
                                     size_t arity);
 
-  void BuildCreateLiteral(const Operator* op,
-                          const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCreateRegExpLiteral(
-      const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCreateArrayLiteral(
-      const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCreateObjectLiteral(
-      const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCreateArguments(CreateArgumentsParameters::Type type,
-                            const interpreter::BytecodeArrayIterator& iterator);
-  void BuildLoadGlobal(const interpreter::BytecodeArrayIterator& iterator,
-                       TypeofMode typeof_mode);
-  void BuildStoreGlobal(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildNamedLoad(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildKeyedLoad(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildNamedStore(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildKeyedStore(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildLdaLookupSlot(TypeofMode typeof_mode,
-                          const interpreter::BytecodeArrayIterator& iterator);
-  void BuildStaLookupSlot(LanguageMode language_mode,
-                          const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCall(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildBinaryOp(const Operator* op,
-                     const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCompareOp(const Operator* op,
-                      const interpreter::BytecodeArrayIterator& iterator);
-  void BuildDelete(const interpreter::BytecodeArrayIterator& iterator);
-  void BuildCastOperator(const Operator* js_op,
-                         const interpreter::BytecodeArrayIterator& iterator);
+  void BuildCreateLiteral(const Operator* op);
+  void BuildCreateRegExpLiteral();
+  void BuildCreateArrayLiteral();
+  void BuildCreateObjectLiteral();
+  void BuildCreateArguments(CreateArgumentsType type);
+  void BuildLoadGlobal(TypeofMode typeof_mode);
+  void BuildStoreGlobal(LanguageMode language_mode);
+  void BuildNamedLoad();
+  void BuildKeyedLoad();
+  void BuildNamedStore(LanguageMode language_mode);
+  void BuildKeyedStore(LanguageMode language_mode);
+  void BuildLdaLookupSlot(TypeofMode typeof_mode);
+  void BuildStaLookupSlot(LanguageMode language_mode);
+  void BuildCall(TailCallMode tail_call_mode);
+  void BuildCallJSRuntime();
+  void BuildCallRuntime();
+  void BuildCallRuntimeForPair();
+  void BuildCallConstruct();
+  void BuildThrow();
+  void BuildBinaryOp(const Operator* op);
+  void BuildCompareOp(const Operator* op);
+  void BuildDelete(LanguageMode language_mode);
+  void BuildCastOperator(const Operator* op);
+  void BuildForInPrepare();
+  void BuildForInNext();
 
   // Control flow plumbing.
-  void BuildJump(int source_offset, int target_offset);
   void BuildJump();
   void BuildConditionalJump(Node* condition);
   void BuildJumpIfEqual(Node* comperand);
   void BuildJumpIfToBooleanEqual(Node* boolean_comperand);
+  void BuildJumpIfNotHole();
 
-  // Constructing merge and loop headers.
-  void MergeEnvironmentsOfBackwardBranches(int source_offset,
-                                           int target_offset);
-  void MergeEnvironmentsOfForwardBranches(int source_offset);
-  void BuildLoopHeaderForBackwardBranches(int source_offset);
+  // Simulates control flow by forward-propagating environments.
+  void MergeIntoSuccessorEnvironment(int target_offset);
+  void BuildLoopHeaderEnvironment(int current_offset);
+  void SwitchToMergeEnvironment(int current_offset);
 
-  // Attaches a frame state to |node| for the entry to the function.
-  void PrepareEntryFrameState(Node* node);
+  // Simulates control flow that exits the function body.
+  void MergeControlToLeaveFunction(Node* exit);
+
+  // Simulates entry and exit of exception handlers.
+  void EnterAndExitExceptionHandlers(int current_offset);
 
   // Growth increment for the temporary buffer used to construct input lists to
   // new nodes.
   static const int kInputBufferSizeIncrement = 64;
 
+  // The catch prediction from the handler table is reused.
+  typedef HandlerTable::CatchPrediction CatchPrediction;
+
+  // An abstract representation for an exception handler that is being
+  // entered and exited while the graph builder is iterating over the
+  // underlying bytecode. The exception handlers within the bytecode are
+  // well scoped, hence will form a stack during iteration.
+  struct ExceptionHandler {
+    int start_offset_;      // Start offset of the handled area in the bytecode.
+    int end_offset_;        // End offset of the handled area in the bytecode.
+    int handler_offset_;    // Handler entry offset within the bytecode.
+    int context_register_;  // Index of register holding handler context.
+    CatchPrediction pred_;  // Prediction of whether handler is catching.
+  };
+
   // Field accessors
+  Graph* graph() const { return jsgraph_->graph(); }
   CommonOperatorBuilder* common() const { return jsgraph_->common(); }
   Zone* graph_zone() const { return graph()->zone(); }
-  CompilationInfo* info() const { return info_; }
   JSGraph* jsgraph() const { return jsgraph_; }
   JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
   Zone* local_zone() const { return local_zone_; }
   const Handle<BytecodeArray>& bytecode_array() const {
     return bytecode_array_;
   }
+  const Handle<HandlerTable>& exception_handler_table() const {
+    return exception_handler_table_;
+  }
+  const Handle<TypeFeedbackVector>& feedback_vector() const {
+    return feedback_vector_;
+  }
   const FrameStateFunctionInfo* frame_state_function_info() const {
     return frame_state_function_info_;
   }
 
-  LanguageMode language_mode() const {
-    // TODO(mythria): Don't rely on parse information to get language mode.
-    return info()->language_mode();
-  }
-
-  const interpreter::BytecodeArrayIterator* bytecode_iterator() const {
-    return bytecode_iterator_;
+  const interpreter::BytecodeArrayIterator& bytecode_iterator() const {
+    return *bytecode_iterator_;
   }
 
   void set_bytecode_iterator(
@@ -211,28 +210,32 @@
     branch_analysis_ = branch_analysis;
   }
 
-#define DECLARE_VISIT_BYTECODE(name, ...) \
-  void Visit##name(const interpreter::BytecodeArrayIterator& iterator);
+#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
   BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
 #undef DECLARE_VISIT_BYTECODE
 
   Zone* local_zone_;
-  CompilationInfo* info_;
   JSGraph* jsgraph_;
   Handle<BytecodeArray> bytecode_array_;
+  Handle<HandlerTable> exception_handler_table_;
+  Handle<TypeFeedbackVector> feedback_vector_;
   const FrameStateFunctionInfo* frame_state_function_info_;
   const interpreter::BytecodeArrayIterator* bytecode_iterator_;
   const BytecodeBranchAnalysis* branch_analysis_;
   Environment* environment_;
 
+  // Indicates whether deoptimization support is enabled for this compilation
+  // and whether valid frame states need to be attached to deoptimizing nodes.
+  bool deoptimization_enabled_;
 
-  // Merge environments are snapshots of the environment at a particular
-  // bytecode offset to be merged into a later environment.
+  // Merge environments are snapshots of the environment at points where the
+  // control flow merges. This models a forward data flow propagation of all
+  // values from all predecessors of the merge in question.
   ZoneMap<int, Environment*> merge_environments_;
 
-  // Loop header environments are environments created for bytecodes
-  // where it is known there are back branches, ie a loop header.
-  ZoneMap<int, Environment*> loop_header_environments_;
+  // Exception handlers currently entered by the iteration.
+  ZoneStack<ExceptionHandler> exception_handlers_;
+  int current_exception_handler_;
 
   // Temporary storage for building node input lists.
   int input_buffer_size_;
@@ -243,100 +246,12 @@
   SetOncePointer<Node> function_closure_;
   SetOncePointer<Node> new_target_;
 
-  // Optimization to cache loaded feedback vector.
-  SetOncePointer<Node> feedback_vector_;
-
   // Control nodes that exit the function body.
   ZoneVector<Node*> exit_controls_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
 };
 
-
-class BytecodeGraphBuilder::Environment : public ZoneObject {
- public:
-  Environment(BytecodeGraphBuilder* builder, int register_count,
-              int parameter_count, Node* control_dependency, Node* context);
-
-  int parameter_count() const { return parameter_count_; }
-  int register_count() const { return register_count_; }
-
-  Node* LookupAccumulator() const;
-  Node* LookupRegister(interpreter::Register the_register) const;
-
-  void ExchangeRegisters(interpreter::Register reg0,
-                         interpreter::Register reg1);
-
-  void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
-  void BindRegister(interpreter::Register the_register, Node* node,
-                    FrameStateBeforeAndAfter* states = nullptr);
-  void BindRegistersToProjections(interpreter::Register first_reg, Node* node,
-                                  FrameStateBeforeAndAfter* states = nullptr);
-  void RecordAfterState(Node* node, FrameStateBeforeAndAfter* states);
-
-  bool IsMarkedAsUnreachable() const;
-  void MarkAsUnreachable();
-
-  // Effect dependency tracked by this environment.
-  Node* GetEffectDependency() { return effect_dependency_; }
-  void UpdateEffectDependency(Node* dependency) {
-    effect_dependency_ = dependency;
-  }
-
-  // Preserve a checkpoint of the environment for the IR graph. Any
-  // further mutation of the environment will not affect checkpoints.
-  Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine);
-
-  // Returns true if the state values are up to date with the current
-  // environment.
-  bool StateValuesAreUpToDate(int output_poke_offset, int output_poke_count);
-
-  // Control dependency tracked by this environment.
-  Node* GetControlDependency() const { return control_dependency_; }
-  void UpdateControlDependency(Node* dependency) {
-    control_dependency_ = dependency;
-  }
-
-  Node* Context() const { return context_; }
-  void SetContext(Node* new_context) { context_ = new_context; }
-
-  Environment* CopyForConditional() const;
-  Environment* CopyForLoop();
-  void Merge(Environment* other);
-
- private:
-  explicit Environment(const Environment* copy);
-  void PrepareForLoop();
-  bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
-                              int output_poke_start, int output_poke_end);
-  bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
-  void UpdateStateValues(Node** state_values, int offset, int count);
-
-  int RegisterToValuesIndex(interpreter::Register the_register) const;
-
-  Zone* zone() const { return builder_->local_zone(); }
-  Graph* graph() const { return builder_->graph(); }
-  CommonOperatorBuilder* common() const { return builder_->common(); }
-  BytecodeGraphBuilder* builder() const { return builder_; }
-  const NodeVector* values() const { return &values_; }
-  NodeVector* values() { return &values_; }
-  int register_base() const { return register_base_; }
-  int accumulator_base() const { return accumulator_base_; }
-
-  BytecodeGraphBuilder* builder_;
-  int register_count_;
-  int parameter_count_;
-  Node* context_;
-  Node* control_dependency_;
-  Node* effect_dependency_;
-  NodeVector values_;
-  Node* parameters_state_values_;
-  Node* registers_state_values_;
-  Node* accumulator_state_values_;
-  int register_base_;
-  int accumulator_base_;
-};
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/c-linkage.cc b/src/compiler/c-linkage.cc
index 44e0bf1..783d9d6 100644
--- a/src/compiler/c-linkage.cc
+++ b/src/compiler/c-linkage.cc
@@ -90,6 +90,7 @@
 // ===========================================================================
 // == mips ===================================================================
 // ===========================================================================
+#define STACK_SHADOW_WORDS 4
 #define PARAM_REGISTERS a0, a1, a2, a3
 #define CALLEE_SAVE_REGISTERS                                                  \
   s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
@@ -133,23 +134,22 @@
 
 // General code uses the above configuration data.
 CallDescriptor* Linkage::GetSimplifiedCDescriptor(
-    Zone* zone, const MachineSignature* msig) {
+    Zone* zone, const MachineSignature* msig, bool set_initialize_root_flag) {
   LocationSignature::Builder locations(zone, msig->return_count(),
                                        msig->parameter_count());
-#if 0  // TODO(titzer): instruction selector tests break here.
   // Check the types of the signature.
   // Currently no floating point parameters or returns are allowed because
   // on x87 and ia32, the FP top of stack is involved.
-
   for (size_t i = 0; i < msig->return_count(); i++) {
-    MachineType type = RepresentationOf(msig->GetReturn(i));
-    CHECK(type != kRepFloat32 && type != kRepFloat64);
+    MachineRepresentation rep = msig->GetReturn(i).representation();
+    CHECK_NE(MachineRepresentation::kFloat32, rep);
+    CHECK_NE(MachineRepresentation::kFloat64, rep);
   }
   for (size_t i = 0; i < msig->parameter_count(); i++) {
-    MachineType type = RepresentationOf(msig->GetParam(i));
-    CHECK(type != kRepFloat32 && type != kRepFloat64);
+    MachineRepresentation rep = msig->GetParam(i).representation();
+    CHECK_NE(MachineRepresentation::kFloat32, rep);
+    CHECK_NE(MachineRepresentation::kFloat64, rep);
   }
-#endif
 
 #ifdef UNSUPPORTED_C_LINKAGE
   // This method should not be called on unknown architectures.
@@ -220,7 +220,9 @@
       Operator::kNoProperties,       // properties
       kCalleeSaveRegisters,          // callee-saved registers
       kCalleeSaveFPRegisters,        // callee-saved fp regs
-      CallDescriptor::kNoFlags,      // flags
+      set_initialize_root_flag ?     // flags
+          CallDescriptor::kInitializeRootRegister
+                               : CallDescriptor::kNoFlags,
       "c-call");
 }
 
diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
index f791db1..e217f37 100644
--- a/src/compiler/change-lowering.cc
+++ b/src/compiler/change-lowering.cc
@@ -49,6 +49,12 @@
       return StoreElement(node);
     case IrOpcode::kAllocate:
       return Allocate(node);
+    case IrOpcode::kObjectIsReceiver:
+      return ObjectIsReceiver(node);
+    case IrOpcode::kObjectIsSmi:
+      return ObjectIsSmi(node);
+    case IrOpcode::kObjectIsNumber:
+      return ObjectIsNumber(node);
     default:
       return NoChange();
   }
@@ -582,6 +588,76 @@
   return Changed(node);
 }
 
+Node* ChangeLowering::IsSmi(Node* value) {
+  return graph()->NewNode(
+      machine()->WordEqual(),
+      graph()->NewNode(machine()->WordAnd(), value,
+                       jsgraph()->IntPtrConstant(kSmiTagMask)),
+      jsgraph()->IntPtrConstant(kSmiTag));
+}
+
+Node* ChangeLowering::LoadHeapObjectMap(Node* object, Node* control) {
+  return graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), object,
+      jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
+      graph()->start(), control);
+}
+
+Node* ChangeLowering::LoadMapInstanceType(Node* map) {
+  return graph()->NewNode(
+      machine()->Load(MachineType::Uint8()), map,
+      jsgraph()->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag),
+      graph()->start(), graph()->start());
+}
+
+Reduction ChangeLowering::ObjectIsNumber(Node* node) {
+  Node* input = NodeProperties::GetValueInput(node, 0);
+  // TODO(bmeurer): Optimize somewhat based on input type.
+  Node* check = IsSmi(input);
+  Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* vtrue = jsgraph()->Int32Constant(1);
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* vfalse = graph()->NewNode(
+      machine()->WordEqual(), LoadHeapObjectMap(input, if_false),
+      jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
+  Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  node->ReplaceInput(0, vtrue);
+  node->AppendInput(graph()->zone(), vfalse);
+  node->AppendInput(graph()->zone(), control);
+  NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
+  return Changed(node);
+}
+
+Reduction ChangeLowering::ObjectIsReceiver(Node* node) {
+  Node* input = NodeProperties::GetValueInput(node, 0);
+  // TODO(bmeurer): Optimize somewhat based on input type.
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  Node* check = IsSmi(input);
+  Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* vtrue = jsgraph()->Int32Constant(0);
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* vfalse =
+      graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+                       jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
+                       LoadMapInstanceType(LoadHeapObjectMap(input, if_false)));
+  Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  node->ReplaceInput(0, vtrue);
+  node->AppendInput(graph()->zone(), vfalse);
+  node->AppendInput(graph()->zone(), control);
+  NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
+  return Changed(node);
+}
+
+Reduction ChangeLowering::ObjectIsSmi(Node* node) {
+  node->ReplaceInput(0,
+                     graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
+                                      jsgraph()->IntPtrConstant(kSmiTagMask)));
+  node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
+  NodeProperties::ChangeOp(node, machine()->WordEqual());
+  return Changed(node);
+}
 
 Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
 
diff --git a/src/compiler/change-lowering.h b/src/compiler/change-lowering.h
index 6d60776..defadd9 100644
--- a/src/compiler/change-lowering.h
+++ b/src/compiler/change-lowering.h
@@ -56,6 +56,14 @@
   Reduction StoreElement(Node* node);
   Reduction Allocate(Node* node);
 
+  Node* IsSmi(Node* value);
+  Node* LoadHeapObjectMap(Node* object, Node* control);
+  Node* LoadMapInstanceType(Node* map);
+
+  Reduction ObjectIsNumber(Node* node);
+  Reduction ObjectIsReceiver(Node* node);
+  Reduction ObjectIsSmi(Node* node);
+
   Node* ComputeIndex(const ElementAccess& access, Node* const key);
   Graph* graph() const;
   Isolate* isolate() const;
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 313567e..712cfe0 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -78,10 +78,12 @@
   if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm());
   }
-
   // Architecture-specific, linkage-specific prologue.
   info->set_prologue_offset(masm()->pc_offset());
   AssemblePrologue();
+  if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
+    masm()->InitializeRootRegister();
+  }
 
   // Define deoptimization literals for all inlined functions.
   DCHECK_EQ(0u, deoptimization_literals_.size());
@@ -175,12 +177,12 @@
     }
   }
 
-  safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
+  safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
 
   Handle<Code> result =
       v8::internal::CodeGenerator::MakeCodeEpilogue(masm(), info);
   result->set_is_turbofanned(true);
-  result->set_stack_slots(frame()->GetSpillSlotCount());
+  result->set_stack_slots(frame()->GetTotalFrameSlotCount());
   result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
 
   // Emit exception handler table.
@@ -234,9 +236,12 @@
     if (operand.IsStackSlot()) {
       int index = LocationOperand::cast(operand).index();
       DCHECK(index >= 0);
-      // Safepoint table indices are 0-based from the beginning of the spill
-      // slot area, adjust appropriately.
-      index -= stackSlotToSpillSlotDelta;
+      // We might index values in the fixed part of the frame (i.e. the
+      // closure pointer or the context pointer); these are not spill slots
+      // and therefore don't work with the SafepointTable currently, but
+      // we also don't need to worry about them, since the GC has special
+      // knowledge about those fields anyway.
+      if (index < stackSlotToSpillSlotDelta) continue;
       safepoint.DefinePointerSlot(index, zone());
     } else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
       Register reg = LocationOperand::cast(operand).GetRegister();
@@ -583,7 +588,7 @@
     case FrameStateType::kInterpretedFunction:
       translation->BeginInterpretedFrame(
           descriptor->bailout_id(), shared_info_id,
-          static_cast<unsigned int>(descriptor->locals_count()));
+          static_cast<unsigned int>(descriptor->locals_count() + 1));
       break;
     case FrameStateType::kArgumentsAdaptor:
       translation->BeginArgumentsAdaptorFrame(
diff --git a/src/compiler/code-stub-assembler.cc b/src/compiler/code-stub-assembler.cc
index b2a05b6..45f47d3 100644
--- a/src/compiler/code-stub-assembler.cc
+++ b/src/compiler/code-stub-assembler.cc
@@ -24,28 +24,33 @@
 namespace internal {
 namespace compiler {
 
-
 CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
                                      const CallInterfaceDescriptor& descriptor,
-                                     Code::Kind kind, const char* name)
+                                     Code::Flags flags, const char* name,
+                                     size_t result_size)
     : raw_assembler_(new RawMachineAssembler(
           isolate, new (zone) Graph(zone),
-          Linkage::GetStubCallDescriptor(isolate, zone, descriptor, 0,
-                                         CallDescriptor::kNoFlags))),
-      kind_(kind),
+          Linkage::GetStubCallDescriptor(
+              isolate, zone, descriptor, descriptor.GetStackParameterCount(),
+              CallDescriptor::kNoFlags, Operator::kNoProperties,
+              MachineType::AnyTagged(), result_size))),
+      flags_(flags),
       name_(name),
-      code_generated_(false) {}
-
+      code_generated_(false),
+      variables_(zone) {}
 
 CodeStubAssembler::~CodeStubAssembler() {}
 
+void CodeStubAssembler::CallPrologue() {}
+
+void CodeStubAssembler::CallEpilogue() {}
 
 Handle<Code> CodeStubAssembler::GenerateCode() {
   DCHECK(!code_generated_);
 
   Schedule* schedule = raw_assembler_->Export();
   Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
-      isolate(), raw_assembler_->call_descriptor(), graph(), schedule, kind_,
+      isolate(), raw_assembler_->call_descriptor(), graph(), schedule, flags_,
       name_);
 
   code_generated_ = true;
@@ -77,6 +82,9 @@
   return raw_assembler_->BooleanConstant(value);
 }
 
+Node* CodeStubAssembler::ExternalConstant(ExternalReference address) {
+  return raw_assembler_->ExternalConstant(address);
+}
 
 Node* CodeStubAssembler::Parameter(int value) {
   return raw_assembler_->Parameter(value);
@@ -87,6 +95,21 @@
   return raw_assembler_->Return(value);
 }
 
+void CodeStubAssembler::Bind(CodeStubAssembler::Label* label) {
+  return label->Bind();
+}
+
+Node* CodeStubAssembler::LoadFramePointer() {
+  return raw_assembler_->LoadFramePointer();
+}
+
+Node* CodeStubAssembler::LoadParentFramePointer() {
+  return raw_assembler_->LoadParentFramePointer();
+}
+
+Node* CodeStubAssembler::LoadStackPointer() {
+  return raw_assembler_->LoadStackPointer();
+}
 
 Node* CodeStubAssembler::SmiShiftBitsConstant() {
   return Int32Constant(kSmiShiftSize + kSmiTagSize);
@@ -102,31 +125,117 @@
   return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
 }
 
+#define DEFINE_CODE_STUB_ASSEMBER_BINARY_OP(name)   \
+  Node* CodeStubAssembler::name(Node* a, Node* b) { \
+    return raw_assembler_->name(a, b);              \
+  }
+CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_BINARY_OP)
+#undef DEFINE_CODE_STUB_ASSEMBER_BINARY_OP
 
-Node* CodeStubAssembler::IntPtrAdd(Node* a, Node* b) {
-  return raw_assembler_->IntPtrAdd(a, b);
+Node* CodeStubAssembler::ChangeInt32ToInt64(Node* value) {
+  return raw_assembler_->ChangeInt32ToInt64(value);
 }
 
-
-Node* CodeStubAssembler::IntPtrSub(Node* a, Node* b) {
-  return raw_assembler_->IntPtrSub(a, b);
-}
-
-
 Node* CodeStubAssembler::WordShl(Node* value, int shift) {
   return raw_assembler_->WordShl(value, Int32Constant(shift));
 }
 
+Node* CodeStubAssembler::WordIsSmi(Node* a) {
+  return WordEqual(raw_assembler_->WordAnd(a, Int32Constant(kSmiTagMask)),
+                   Int32Constant(0));
+}
+
+Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset) {
+  return raw_assembler_->Load(MachineType::AnyTagged(), buffer,
+                              IntPtrConstant(offset));
+}
 
 Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) {
   return raw_assembler_->Load(MachineType::AnyTagged(), object,
                               IntPtrConstant(offset - kHeapObjectTag));
 }
 
+Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object,
+                                                       Node* smi_index,
+                                                       int additional_offset) {
+  Node* header_size = raw_assembler_->Int32Constant(
+      additional_offset + FixedArray::kHeaderSize - kHeapObjectTag);
+  Node* scaled_index =
+      (kSmiShiftSize == 0)
+          ? raw_assembler_->Word32Shl(
+                smi_index, Int32Constant(kPointerSizeLog2 - kSmiTagSize))
+          : raw_assembler_->Word32Shl(SmiUntag(smi_index),
+                                      Int32Constant(kPointerSizeLog2));
+  Node* offset = raw_assembler_->Int32Add(scaled_index, header_size);
+  return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
+}
+
+Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object,
+                                                            int index) {
+  Node* offset = raw_assembler_->Int32Constant(
+      FixedArray::kHeaderSize - kHeapObjectTag + index * kPointerSize);
+  return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
+}
+
+Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
+  if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
+    Handle<Object> root = isolate()->heap()->root_handle(root_index);
+    if (root->IsSmi()) {
+      return Int32Constant(Handle<Smi>::cast(root)->value());
+    } else {
+      return HeapConstant(Handle<HeapObject>::cast(root));
+    }
+  }
+
+  compiler::Node* roots_array_start =
+      ExternalConstant(ExternalReference::roots_array_start(isolate()));
+  USE(roots_array_start);
+
+  // TODO(danno): Implement thee root-access case where the root is not constant
+  // and must be loaded from the root array.
+  UNIMPLEMENTED();
+  return nullptr;
+}
+
+Node* CodeStubAssembler::Load(MachineType rep, Node* base) {
+  return raw_assembler_->Load(rep, base);
+}
+
+Node* CodeStubAssembler::Load(MachineType rep, Node* base, Node* index) {
+  return raw_assembler_->Load(rep, base, index);
+}
+
+Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
+                               Node* value) {
+  return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
+}
+
+Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
+                               Node* index, Node* value) {
+  return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
+}
+
+Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
+                                             Node* base, Node* value) {
+  return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
+}
+
+Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
+                                             Node* base, Node* index,
+                                             Node* value) {
+  return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
+}
+
+Node* CodeStubAssembler::Projection(int index, Node* value) {
+  return raw_assembler_->Projection(index, value);
+}
 
 Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
                                Node** args) {
-  return raw_assembler_->CallN(descriptor, code_target, args);
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
+  CallEpilogue();
+  return return_value;
 }
 
 
@@ -135,41 +244,371 @@
   return raw_assembler_->TailCallN(descriptor, code_target, args);
 }
 
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+                                     Node* context) {
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
+  CallEpilogue();
+  return return_value;
+}
 
 Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
                                      Node* context, Node* arg1) {
-  return raw_assembler_->CallRuntime1(function_id, arg1, context);
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
+  CallEpilogue();
+  return return_value;
 }
 
-
 Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
                                      Node* context, Node* arg1, Node* arg2) {
-  return raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+  CallPrologue();
+  Node* return_value =
+      raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+  CallEpilogue();
+  return return_value;
 }
 
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+                                     Node* context, Node* arg1, Node* arg2,
+                                     Node* arg3) {
+  CallPrologue();
+  Node* return_value =
+      raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
+  CallEpilogue();
+  return return_value;
+}
+
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+                                     Node* context, Node* arg1, Node* arg2,
+                                     Node* arg3, Node* arg4) {
+  CallPrologue();
+  Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
+                                                    arg3, arg4, context);
+  CallEpilogue();
+  return return_value;
+}
 
 Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
                                          Node* context, Node* arg1) {
   return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
 }
 
-
 Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
                                          Node* context, Node* arg1,
                                          Node* arg2) {
   return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
 }
 
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                         Node* context, Node* arg1, Node* arg2,
+                                         Node* arg3) {
+  return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
+                                          context);
+}
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+                                         Node* context, Node* arg1, Node* arg2,
+                                         Node* arg3, Node* arg4) {
+  return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
+                                          context);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(2);
+  args[0] = arg1;
+  args[1] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(3);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, Node* arg3, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(4);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, Node* arg3, Node* arg4,
+                                  size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(5);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = arg4;
+  args[4] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+                                  Node* target, Node* context, Node* arg1,
+                                  Node* arg2, Node* arg3, Node* arg4,
+                                  Node* arg5, size_t result_size) {
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+      CallDescriptor::kNoFlags, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+
+  Node** args = zone()->NewArray<Node*>(6);
+  args[0] = arg1;
+  args[1] = arg2;
+  args[2] = arg3;
+  args[3] = arg4;
+  args[4] = arg5;
+  args[5] = context;
+
+  return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::TailCallStub(CodeStub& stub, Node** args) {
+  Node* code_target = HeapConstant(stub.GetCode());
+  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), stub.GetCallInterfaceDescriptor(),
+      stub.GetStackParameterCount(), CallDescriptor::kSupportsTailCalls);
+  return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+Node* CodeStubAssembler::TailCall(
+    const CallInterfaceDescriptor& interface_descriptor, Node* code_target,
+    Node** args, size_t result_size) {
+  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), interface_descriptor,
+      interface_descriptor.GetStackParameterCount(),
+      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+      MachineType::AnyTagged(), result_size);
+  return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+void CodeStubAssembler::Goto(CodeStubAssembler::Label* label) {
+  label->MergeVariables();
+  raw_assembler_->Goto(label->label_);
+}
+
+void CodeStubAssembler::Branch(Node* condition,
+                               CodeStubAssembler::Label* true_label,
+                               CodeStubAssembler::Label* false_label) {
+  true_label->MergeVariables();
+  false_label->MergeVariables();
+  return raw_assembler_->Branch(condition, true_label->label_,
+                                false_label->label_);
+}
+
+void CodeStubAssembler::Switch(Node* index, Label* default_label,
+                               int32_t* case_values, Label** case_labels,
+                               size_t case_count) {
+  RawMachineLabel** labels =
+      new (zone()->New(sizeof(RawMachineLabel*) * case_count))
+          RawMachineLabel*[case_count];
+  for (size_t i = 0; i < case_count; ++i) {
+    labels[i] = case_labels[i]->label_;
+    case_labels[i]->MergeVariables();
+    default_label->MergeVariables();
+  }
+  return raw_assembler_->Switch(index, default_label->label_, case_values,
+                                labels, case_count);
+}
 
 // RawMachineAssembler delegate helpers:
 Isolate* CodeStubAssembler::isolate() { return raw_assembler_->isolate(); }
 
-
 Graph* CodeStubAssembler::graph() { return raw_assembler_->graph(); }
 
-
 Zone* CodeStubAssembler::zone() { return raw_assembler_->zone(); }
 
+// The core implementation of Variable is stored through an indirection so
+// that it can outlive the often block-scoped Variable declarations. This is
+// needed to ensure that variable binding and merging through phis can
+// properly be verified.
+class CodeStubAssembler::Variable::Impl : public ZoneObject {
+ public:
+  explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
+  Node* value_;
+  MachineRepresentation rep_;
+};
+
+CodeStubAssembler::Variable::Variable(CodeStubAssembler* assembler,
+                                      MachineRepresentation rep)
+    : impl_(new (assembler->zone()) Impl(rep)) {
+  assembler->variables_.push_back(impl_);
+}
+
+void CodeStubAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
+
+Node* CodeStubAssembler::Variable::value() const {
+  DCHECK_NOT_NULL(impl_->value_);
+  return impl_->value_;
+}
+
+MachineRepresentation CodeStubAssembler::Variable::rep() const {
+  return impl_->rep_;
+}
+
+bool CodeStubAssembler::Variable::IsBound() const {
+  return impl_->value_ != nullptr;
+}
+
+CodeStubAssembler::Label::Label(CodeStubAssembler* assembler)
+    : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+  void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
+  label_ = new (buffer) RawMachineLabel();
+}
+
+CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
+                                int merged_value_count,
+                                CodeStubAssembler::Variable** merged_variables)
+    : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+  void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
+  label_ = new (buffer) RawMachineLabel();
+  for (int i = 0; i < merged_value_count; ++i) {
+    variable_phis_[merged_variables[i]->impl_] = nullptr;
+  }
+}
+
+CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
+                                CodeStubAssembler::Variable* merged_variable)
+    : CodeStubAssembler::Label(assembler, 1, &merged_variable) {}
+
+void CodeStubAssembler::Label::MergeVariables() {
+  ++merge_count_;
+  for (auto var : assembler_->variables_) {
+    size_t count = 0;
+    Node* node = var->value_;
+    if (node != nullptr) {
+      auto i = variable_merges_.find(var);
+      if (i != variable_merges_.end()) {
+        i->second.push_back(node);
+        count = i->second.size();
+      } else {
+        count = 1;
+        variable_merges_[var] = std::vector<Node*>(1, node);
+      }
+    }
+    // If the following asserts, then you've jumped to a label without a bound
+    // variable along that path that expects to merge its value into a phi.
+    DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
+           count == merge_count_);
+    USE(count);
+
+    // If the label is already bound, we already know the set of variables to
+    // merge and phi nodes have already been created.
+    if (bound_) {
+      auto phi = variable_phis_.find(var);
+      if (phi != variable_phis_.end()) {
+        DCHECK_NOT_NULL(phi->second);
+        assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
+      } else {
+        auto i = variable_merges_.find(var);
+        USE(i);
+        // If the following assert fires, then you've declared a variable that
+        // has the same bound value along all paths up until the point you bound
+        // this label, but then later merged a path with a new value for the
+        // variable after the label bind (it's not possible to add phis to the
+        // bound label after the fact, just make sure to list the variable in
+        // the label's constructor's list of merged variables).
+        DCHECK(find_if(i->second.begin(), i->second.end(),
+                       [node](Node* e) -> bool { return node != e; }) ==
+               i->second.end());
+      }
+    }
+  }
+}
+
+void CodeStubAssembler::Label::Bind() {
+  DCHECK(!bound_);
+  assembler_->raw_assembler_->Bind(label_);
+
+  // Make sure that all variables that have changed along any path up to this
+  // point are marked as merge variables.
+  for (auto var : assembler_->variables_) {
+    Node* shared_value = nullptr;
+    auto i = variable_merges_.find(var);
+    if (i != variable_merges_.end()) {
+      for (auto value : i->second) {
+        DCHECK(value != nullptr);
+        if (value != shared_value) {
+          if (shared_value == nullptr) {
+            shared_value = value;
+          } else {
+            variable_phis_[var] = nullptr;
+          }
+        }
+      }
+    }
+  }
+
+  for (auto var : variable_phis_) {
+    CodeStubAssembler::Variable::Impl* var_impl = var.first;
+    auto i = variable_merges_.find(var_impl);
+    // If the following assert fires, then a variable that has been marked as
+    // being merged at the label--either by explicitly marking it so in the
+    // label constructor or by having seen different bound values at branches
+    // into the label--doesn't have a bound value along all of the paths that
+    // have been merged into the label up to this point.
+    DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
+    Node* phi = assembler_->raw_assembler_->Phi(
+        var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
+    variable_phis_[var_impl] = phi;
+  }
+
+  // Bind all variables to a merge phi, the common value along all paths or
+  // null.
+  for (auto var : assembler_->variables_) {
+    auto i = variable_phis_.find(var);
+    if (i != variable_phis_.end()) {
+      var->value_ = i->second;
+    } else {
+      auto j = variable_merges_.find(var);
+      if (j != variable_merges_.end() && j->second.size() == merge_count_) {
+        var->value_ = j->second.back();
+      } else {
+        var->value_ = nullptr;
+      }
+    }
+  }
+
+  bound_ = true;
+}
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/code-stub-assembler.h b/src/compiler/code-stub-assembler.h
index 3c4ae05..2ab1376 100644
--- a/src/compiler/code-stub-assembler.h
+++ b/src/compiler/code-stub-assembler.h
@@ -5,11 +5,16 @@
 #ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_
 #define V8_COMPILER_CODE_STUB_ASSEMBLER_H_
 
+#include <map>
+
 // Clients of this interface shouldn't depend on lots of compiler internals.
 // Do not include anything from src/compiler here!
 #include "src/allocation.h"
 #include "src/builtins.h"
+#include "src/heap/heap.h"
+#include "src/machine-type.h"
 #include "src/runtime/runtime.h"
+#include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -25,48 +30,196 @@
 class Node;
 class Operator;
 class RawMachineAssembler;
+class RawMachineLabel;
 class Schedule;
 
+#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \
+  V(IntPtrAdd)                                \
+  V(IntPtrSub)                                \
+  V(Int32Add)                                 \
+  V(Int32Sub)                                 \
+  V(Int32Mul)                                 \
+  V(Int32GreaterThanOrEqual)                  \
+  V(WordEqual)                                \
+  V(WordNotEqual)                             \
+  V(WordOr)                                   \
+  V(WordAnd)                                  \
+  V(WordXor)                                  \
+  V(WordShl)                                  \
+  V(WordShr)                                  \
+  V(WordSar)                                  \
+  V(WordRor)                                  \
+  V(Word32Equal)                              \
+  V(Word32NotEqual)                           \
+  V(Word32Or)                                 \
+  V(Word32And)                                \
+  V(Word32Xor)                                \
+  V(Word32Shl)                                \
+  V(Word32Shr)                                \
+  V(Word32Sar)                                \
+  V(Word32Ror)                                \
+  V(Word64Equal)                              \
+  V(Word64NotEqual)                           \
+  V(Word64Or)                                 \
+  V(Word64And)                                \
+  V(Word64Xor)                                \
+  V(Word64Shr)                                \
+  V(Word64Sar)                                \
+  V(Word64Ror)                                \
+  V(UintPtrGreaterThanOrEqual)
+
 class CodeStubAssembler {
  public:
+  // |result_size| specifies the number of results returned by the stub.
+  // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
   CodeStubAssembler(Isolate* isolate, Zone* zone,
-                    const CallInterfaceDescriptor& descriptor, Code::Kind kind,
-                    const char* name);
+                    const CallInterfaceDescriptor& descriptor,
+                    Code::Flags flags, const char* name,
+                    size_t result_size = 1);
   virtual ~CodeStubAssembler();
 
   Handle<Code> GenerateCode();
 
+  class Label;
+  class Variable {
+   public:
+    explicit Variable(CodeStubAssembler* assembler, MachineRepresentation rep);
+    void Bind(Node* value);
+    Node* value() const;
+    MachineRepresentation rep() const;
+    bool IsBound() const;
+
+   private:
+    friend class CodeStubAssembler;
+    class Impl;
+    Impl* impl_;
+  };
+
+  // ===========================================================================
+  // Base Assembler
+  // ===========================================================================
+
   // Constants.
   Node* Int32Constant(int value);
   Node* IntPtrConstant(intptr_t value);
   Node* NumberConstant(double value);
   Node* HeapConstant(Handle<HeapObject> object);
   Node* BooleanConstant(bool value);
+  Node* ExternalConstant(ExternalReference address);
 
   Node* Parameter(int value);
   void Return(Node* value);
 
-  // Tag and untag Smi values.
-  Node* SmiTag(Node* value);
-  Node* SmiUntag(Node* value);
+  void Bind(Label* label);
+  void Goto(Label* label);
+  void Branch(Node* condition, Label* true_label, Label* false_label);
 
-  // Basic arithmetic operations.
-  Node* IntPtrAdd(Node* a, Node* b);
-  Node* IntPtrSub(Node* a, Node* b);
+  void Switch(Node* index, Label* default_label, int32_t* case_values,
+              Label** case_labels, size_t case_count);
+
+  // Access to the frame pointer
+  Node* LoadFramePointer();
+  Node* LoadParentFramePointer();
+
+  // Access to the stack pointer
+  Node* LoadStackPointer();
+
+  // Load raw memory location.
+  Node* Load(MachineType rep, Node* base);
+  Node* Load(MachineType rep, Node* base, Node* index);
+
+  // Store value to raw memory location.
+  Node* Store(MachineRepresentation rep, Node* base, Node* value);
+  Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
+  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
+  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
+                            Node* value);
+
+// Basic arithmetic operations.
+#define DECLARE_CODE_STUB_ASSEMBER_BINARY_OP(name) Node* name(Node* a, Node* b);
+  CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP)
+#undef DECLARE_CODE_STUB_ASSEMBER_BINARY_OP
+
   Node* WordShl(Node* value, int shift);
 
-  // Load a field from an object on the heap.
-  Node* LoadObjectField(Node* object, int offset);
+  // Conversions
+  Node* ChangeInt32ToInt64(Node* value);
 
-  // Call runtime function.
+  // Projections
+  Node* Projection(int index, Node* value);
+
+  // Calls
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
   Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
   Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
                     Node* arg2);
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+                    Node* arg2, Node* arg3);
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+                    Node* arg2, Node* arg3, Node* arg4);
+  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+                    Node* arg2, Node* arg3, Node* arg4, Node* arg5);
 
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
                         Node* arg1);
   Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
                         Node* arg1, Node* arg2);
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+                        Node* arg1, Node* arg2, Node* arg3);
+  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+                        Node* arg1, Node* arg2, Node* arg3, Node* arg4);
+
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, Node* arg3,
+                 size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+                 size_t result_size = 1);
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+                 Node* arg5, size_t result_size = 1);
+
+  Node* TailCallStub(CodeStub& stub, Node** args);
+  Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node** args, size_t result_size = 1);
+
+  // ===========================================================================
+  // Macros
+  // ===========================================================================
+
+  // Tag and untag Smi values.
+  Node* SmiTag(Node* value);
+  Node* SmiUntag(Node* value);
+
+  // Load a value from the root array.
+  Node* LoadRoot(Heap::RootListIndex root_index);
+
+  // Check a value for smi-ness
+  Node* WordIsSmi(Node* a);
+
+  // Load an object pointer from a buffer that isn't in the heap.
+  Node* LoadBufferObject(Node* buffer, int offset);
+  // Load a field from an object on the heap.
+  Node* LoadObjectField(Node* object, int offset);
+
+  // Load an array element from a FixedArray.
+  Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index,
+                                      int additional_offset = 0);
+  Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
+
+ protected:
+  // Protected helpers which delegate to RawMachineAssembler.
+  Graph* graph();
+  Isolate* isolate();
+  Zone* zone();
+
+  // Enables subclasses to perform operations before and after a call.
+  virtual void CallPrologue();
+  virtual void CallEpilogue();
 
  private:
   friend class CodeStubAssemblerTester;
@@ -76,19 +229,42 @@
 
   Node* SmiShiftBitsConstant();
 
-  // Private helpers which delegate to RawMachineAssembler.
-  Graph* graph();
-  Isolate* isolate();
-  Zone* zone();
-
   base::SmartPointer<RawMachineAssembler> raw_assembler_;
-  Code::Kind kind_;
+  Code::Flags flags_;
   const char* name_;
   bool code_generated_;
+  ZoneVector<Variable::Impl*> variables_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
 };
 
+class CodeStubAssembler::Label {
+ public:
+  explicit Label(CodeStubAssembler* assembler);
+  Label(CodeStubAssembler* assembler, int merged_variable_count,
+        CodeStubAssembler::Variable** merged_variables);
+  Label(CodeStubAssembler* assembler,
+        CodeStubAssembler::Variable* merged_variable);
+  ~Label() {}
+
+ private:
+  friend class CodeStubAssembler;
+
+  void Bind();
+  void MergeVariables();
+
+  bool bound_;
+  size_t merge_count_;
+  CodeStubAssembler* assembler_;
+  RawMachineLabel* label_;
+  // Map of variables that need to be merged to their phi nodes (or placeholders
+  // for those phis).
+  std::map<Variable::Impl*, Node*> variable_phis_;
+  // Map of variables to the list of value nodes that have been added from each
+  // merge path in their order of merging.
+  std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
+};
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index be77309..c92bae9 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -803,11 +803,6 @@
 }
 
 
-const Operator* CommonOperatorBuilder::LazyBailout() {
-  return Call(Linkage::GetLazyBailoutDescriptor(zone()));
-}
-
-
 const Operator* CommonOperatorBuilder::TailCall(
     const CallDescriptor* descriptor) {
   class TailCallOperator final : public Operator1<const CallDescriptor*> {
@@ -866,11 +861,9 @@
 const FrameStateFunctionInfo*
 CommonOperatorBuilder::CreateFrameStateFunctionInfo(
     FrameStateType type, int parameter_count, int local_count,
-    Handle<SharedFunctionInfo> shared_info,
-    ContextCallingMode context_calling_mode) {
+    Handle<SharedFunctionInfo> shared_info) {
   return new (zone()->New(sizeof(FrameStateFunctionInfo)))
-      FrameStateFunctionInfo(type, parameter_count, local_count, shared_info,
-                             context_calling_mode);
+      FrameStateFunctionInfo(type, parameter_count, local_count, shared_info);
 }
 
 }  // namespace compiler
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 83cb5b2..7c3f3da 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -14,11 +14,7 @@
 
 // Forward declarations.
 class ExternalReference;
-template <class>
-class TypeImpl;
-struct ZoneTypeConfig;
-typedef TypeImpl<ZoneTypeConfig> Type;
-
+class Type;
 
 namespace compiler {
 
@@ -174,7 +170,6 @@
   const Operator* Call(const CallDescriptor* descriptor);
   const Operator* TailCall(const CallDescriptor* descriptor);
   const Operator* Projection(size_t index);
-  const Operator* LazyBailout();
 
   // Constructs a new merge or phi operator with the same opcode as {op}, but
   // with {size} inputs.
@@ -183,8 +178,7 @@
   // Constructs function info for frame state construction.
   const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
       FrameStateType type, int parameter_count, int local_count,
-      Handle<SharedFunctionInfo> shared_info,
-      ContextCallingMode context_calling_mode);
+      Handle<SharedFunctionInfo> shared_info);
 
  private:
   Zone* zone() const { return zone_; }
diff --git a/src/compiler/escape-analysis-reducer.cc b/src/compiler/escape-analysis-reducer.cc
index df8b65d..313b639 100644
--- a/src/compiler/escape-analysis-reducer.cc
+++ b/src/compiler/escape-analysis-reducer.cc
@@ -11,6 +11,15 @@
 namespace internal {
 namespace compiler {
 
+#ifdef DEBUG
+#define TRACE(...)                                    \
+  do {                                                \
+    if (FLAG_trace_turbo_escape) PrintF(__VA_ARGS__); \
+  } while (false)
+#else
+#define TRACE(...)
+#endif  // DEBUG
+
 EscapeAnalysisReducer::EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
                                              EscapeAnalysis* escape_analysis,
                                              Zone* zone)
@@ -18,10 +27,16 @@
       jsgraph_(jsgraph),
       escape_analysis_(escape_analysis),
       zone_(zone),
-      visited_(static_cast<int>(jsgraph->graph()->NodeCount()), zone) {}
+      fully_reduced_(static_cast<int>(jsgraph->graph()->NodeCount() * 2), zone),
+      exists_virtual_allocate_(true) {}
 
 
 Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
+      fully_reduced_.Contains(node->id())) {
+    return NoChange();
+  }
+
   switch (node->opcode()) {
     case IrOpcode::kLoadField:
     case IrOpcode::kLoadElement:
@@ -37,11 +52,44 @@
       return ReduceReferenceEqual(node);
     case IrOpcode::kObjectIsSmi:
       return ReduceObjectIsSmi(node);
+    // FrameStates and Value nodes are preprocessed here,
+    // and visited via ReduceFrameStateUses from their user nodes.
+    case IrOpcode::kFrameState:
+    case IrOpcode::kStateValues: {
+      if (node->id() >= static_cast<NodeId>(fully_reduced_.length()) ||
+          fully_reduced_.Contains(node->id())) {
+        break;
+      }
+      bool depends_on_object_state = false;
+      for (int i = 0; i < node->InputCount(); i++) {
+        Node* input = node->InputAt(i);
+        switch (input->opcode()) {
+          case IrOpcode::kAllocate:
+          case IrOpcode::kFinishRegion:
+            depends_on_object_state =
+                depends_on_object_state || escape_analysis()->IsVirtual(input);
+            break;
+          case IrOpcode::kFrameState:
+          case IrOpcode::kStateValues:
+            depends_on_object_state =
+                depends_on_object_state ||
+                input->id() >= static_cast<NodeId>(fully_reduced_.length()) ||
+                !fully_reduced_.Contains(input->id());
+            break;
+          default:
+            break;
+        }
+      }
+      if (!depends_on_object_state) {
+        fully_reduced_.Add(node->id());
+      }
+      return NoChange();
+    }
     default:
       // TODO(sigurds): Change this to GetFrameStateInputCount once
       // it is working. For now we use EffectInputCount > 0 to determine
       // whether a node might have a frame state input.
-      if (node->op()->EffectInputCount() > 0) {
+      if (exists_virtual_allocate_ && node->op()->EffectInputCount() > 0) {
         return ReduceFrameStateUses(node);
       }
       break;
@@ -53,17 +101,15 @@
 Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
   DCHECK(node->opcode() == IrOpcode::kLoadField ||
          node->opcode() == IrOpcode::kLoadElement);
-  if (visited_.Contains(node->id())) return NoChange();
-  visited_.Add(node->id());
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+    fully_reduced_.Add(node->id());
+  }
   if (Node* rep = escape_analysis()->GetReplacement(node)) {
-    visited_.Add(node->id());
     counters()->turbo_escape_loads_replaced()->Increment();
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Replaced #%d (%s) with #%d (%s)\n", node->id(),
-             node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
-    }
+    TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
+          node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
     ReplaceWithValue(node, rep);
-    return Changed(rep);
+    return Replace(rep);
   }
   return NoChange();
 }
@@ -72,13 +118,12 @@
 Reduction EscapeAnalysisReducer::ReduceStore(Node* node) {
   DCHECK(node->opcode() == IrOpcode::kStoreField ||
          node->opcode() == IrOpcode::kStoreElement);
-  if (visited_.Contains(node->id())) return NoChange();
-  visited_.Add(node->id());
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+    fully_reduced_.Add(node->id());
+  }
   if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Removed #%d (%s) from effect chain\n", node->id(),
-             node->op()->mnemonic());
-    }
+    TRACE("Removed #%d (%s) from effect chain\n", node->id(),
+          node->op()->mnemonic());
     RelaxEffectsAndControls(node);
     return Changed(node);
   }
@@ -88,14 +133,13 @@
 
 Reduction EscapeAnalysisReducer::ReduceAllocate(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
-  if (visited_.Contains(node->id())) return NoChange();
-  visited_.Add(node->id());
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+    fully_reduced_.Add(node->id());
+  }
   if (escape_analysis()->IsVirtual(node)) {
     RelaxEffectsAndControls(node);
     counters()->turbo_escape_allocs_replaced()->Increment();
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Removed allocate #%d from effect chain\n", node->id());
-    }
+    TRACE("Removed allocate #%d from effect chain\n", node->id());
     return Changed(node);
   }
   return NoChange();
@@ -106,8 +150,14 @@
   DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
   Node* effect = NodeProperties::GetEffectInput(node, 0);
   if (effect->opcode() == IrOpcode::kBeginRegion) {
+    // We only add it now to remove empty Begin/Finish region pairs
+    // in the process.
+    if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+      fully_reduced_.Add(node->id());
+    }
     RelaxEffectsAndControls(effect);
     RelaxEffectsAndControls(node);
+#ifdef DEBUG
     if (FLAG_trace_turbo_escape) {
       PrintF("Removed region #%d / #%d from effect chain,", effect->id(),
              node->id());
@@ -117,6 +167,7 @@
       }
       PrintF("\n");
     }
+#endif  // DEBUG
     return Changed(node);
   }
   return NoChange();
@@ -131,22 +182,18 @@
     if (escape_analysis()->IsVirtual(right) &&
         escape_analysis()->CompareVirtualObjects(left, right)) {
       ReplaceWithValue(node, jsgraph()->TrueConstant());
-      if (FLAG_trace_turbo_escape) {
-        PrintF("Replaced ref eq #%d with true\n", node->id());
-      }
+      TRACE("Replaced ref eq #%d with true\n", node->id());
+      Replace(jsgraph()->TrueConstant());
     }
     // Right-hand side is not a virtual object, or a different one.
     ReplaceWithValue(node, jsgraph()->FalseConstant());
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Replaced ref eq #%d with false\n", node->id());
-    }
-    return Replace(node);
+    TRACE("Replaced ref eq #%d with false\n", node->id());
+    return Replace(jsgraph()->FalseConstant());
   } else if (escape_analysis()->IsVirtual(right)) {
     // Left-hand side is not a virtual object.
     ReplaceWithValue(node, jsgraph()->FalseConstant());
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Replaced ref eq #%d with false\n", node->id());
-    }
+    TRACE("Replaced ref eq #%d with false\n", node->id());
+    return Replace(jsgraph()->FalseConstant());
   }
   return NoChange();
 }
@@ -157,24 +204,23 @@
   Node* input = NodeProperties::GetValueInput(node, 0);
   if (escape_analysis()->IsVirtual(input)) {
     ReplaceWithValue(node, jsgraph()->FalseConstant());
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Replaced ObjectIsSmi #%d with false\n", node->id());
-    }
-    return Replace(node);
+    TRACE("Replaced ObjectIsSmi #%d with false\n", node->id());
+    return Replace(jsgraph()->FalseConstant());
   }
   return NoChange();
 }
 
 
 Reduction EscapeAnalysisReducer::ReduceFrameStateUses(Node* node) {
-  if (visited_.Contains(node->id())) return NoChange();
-  visited_.Add(node->id());
   DCHECK_GE(node->op()->EffectInputCount(), 1);
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+    fully_reduced_.Add(node->id());
+  }
   bool changed = false;
   for (int i = 0; i < node->InputCount(); ++i) {
     Node* input = node->InputAt(i);
     if (input->opcode() == IrOpcode::kFrameState) {
-      if (Node* ret = ReduceFrameState(input, node, false)) {
+      if (Node* ret = ReduceDeoptState(input, node, false)) {
         node->ReplaceInput(i, ret);
         changed = true;
       }
@@ -188,78 +234,56 @@
 
 
 // Returns the clone if it duplicated the node, and null otherwise.
-Node* EscapeAnalysisReducer::ReduceFrameState(Node* node, Node* effect,
+Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
                                               bool multiple_users) {
-  DCHECK(node->opcode() == IrOpcode::kFrameState);
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Reducing FrameState %d\n", node->id());
+  DCHECK(node->opcode() == IrOpcode::kFrameState ||
+         node->opcode() == IrOpcode::kStateValues);
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
+      fully_reduced_.Contains(node->id())) {
+    return nullptr;
   }
+  TRACE("Reducing %s %d\n", node->op()->mnemonic(), node->id());
   Node* clone = nullptr;
+  bool node_multiused = node->UseCount() > 1;
+  bool multiple_users_rec = multiple_users || node_multiused;
   for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
     Node* input = NodeProperties::GetValueInput(node, i);
-    Node* ret =
-        input->opcode() == IrOpcode::kStateValues
-            ? ReduceStateValueInputs(input, effect, node->UseCount() > 1)
-            : ReduceStateValueInput(node, i, effect, node->UseCount() > 1);
-    if (ret) {
-      if (node->UseCount() > 1 || multiple_users) {
-        if (FLAG_trace_turbo_escape) {
-          PrintF("  Cloning #%d", node->id());
-        }
-        node = clone = jsgraph()->graph()->CloneNode(node);
-        if (FLAG_trace_turbo_escape) {
-          PrintF(" to #%d\n", node->id());
-        }
-        multiple_users = false;  // Don't clone anymore.
-      }
-      NodeProperties::ReplaceValueInput(node, ret, i);
-    }
-  }
-  Node* outer_frame_state = NodeProperties::GetFrameStateInput(node, 0);
-  if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
-    if (Node* ret =
-            ReduceFrameState(outer_frame_state, effect, node->UseCount() > 1)) {
-      if (node->UseCount() > 1 || multiple_users) {
-        if (FLAG_trace_turbo_escape) {
-          PrintF("  Cloning #%d", node->id());
-        }
-        node = clone = jsgraph()->graph()->CloneNode(node);
-        if (FLAG_trace_turbo_escape) {
-          PrintF(" to #%d\n", node->id());
-        }
-        multiple_users = false;
-      }
-      NodeProperties::ReplaceFrameStateInput(node, 0, ret);
-    }
-  }
-  return clone;
-}
-
-
-// Returns the clone if it duplicated the node, and null otherwise.
-Node* EscapeAnalysisReducer::ReduceStateValueInputs(Node* node, Node* effect,
-                                                    bool multiple_users) {
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Reducing StateValue #%d\n", node->id());
-  }
-  DCHECK(node->opcode() == IrOpcode::kStateValues);
-  DCHECK_NOT_NULL(effect);
-  Node* clone = nullptr;
-  for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
-    Node* input = NodeProperties::GetValueInput(node, i);
-    Node* ret = nullptr;
     if (input->opcode() == IrOpcode::kStateValues) {
-      ret = ReduceStateValueInputs(input, effect, multiple_users);
+      if (Node* ret = ReduceDeoptState(input, effect, multiple_users_rec)) {
+        if (node_multiused || (multiple_users && !clone)) {
+          TRACE("  Cloning #%d", node->id());
+          node = clone = jsgraph()->graph()->CloneNode(node);
+          TRACE(" to #%d\n", node->id());
+          node_multiused = false;
+        }
+        NodeProperties::ReplaceValueInput(node, ret, i);
+      }
     } else {
-      ret = ReduceStateValueInput(node, i, effect, multiple_users);
+      if (Node* ret = ReduceStateValueInput(node, i, effect, node_multiused,
+                                            clone, multiple_users)) {
+        DCHECK_NULL(clone);
+        node_multiused = false;  // Don't clone anymore.
+        node = clone = ret;
+      }
     }
-    if (ret) {
-      node = ret;
-      DCHECK_NULL(clone);
-      clone = ret;
-      multiple_users = false;
+  }
+  if (node->opcode() == IrOpcode::kFrameState) {
+    Node* outer_frame_state = NodeProperties::GetFrameStateInput(node, 0);
+    if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
+      if (Node* ret =
+              ReduceDeoptState(outer_frame_state, effect, multiple_users_rec)) {
+        if (node_multiused || (multiple_users && !clone)) {
+          TRACE("    Cloning #%d", node->id());
+          node = clone = jsgraph()->graph()->CloneNode(node);
+          TRACE(" to #%d\n", node->id());
+        }
+        NodeProperties::ReplaceFrameStateInput(node, 0, ret);
+      }
     }
   }
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+    fully_reduced_.Add(node->id());
+  }
   return clone;
 }
 
@@ -267,36 +291,36 @@
 // Returns the clone if it duplicated the node, and null otherwise.
 Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
                                                    Node* effect,
+                                                   bool node_multiused,
+                                                   bool already_cloned,
                                                    bool multiple_users) {
   Node* input = NodeProperties::GetValueInput(node, node_index);
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Reducing State Input #%d (%s)\n", input->id(),
-           input->op()->mnemonic());
+  if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
+      fully_reduced_.Contains(node->id())) {
+    return nullptr;
   }
+  TRACE("Reducing State Input #%d (%s)\n", input->id(),
+        input->op()->mnemonic());
   Node* clone = nullptr;
   if (input->opcode() == IrOpcode::kFinishRegion ||
       input->opcode() == IrOpcode::kAllocate) {
     if (escape_analysis()->IsVirtual(input)) {
       if (Node* object_state =
               escape_analysis()->GetOrCreateObjectState(effect, input)) {
-        if (node->UseCount() > 1 || multiple_users) {
-          if (FLAG_trace_turbo_escape) {
-            PrintF("Cloning #%d", node->id());
-          }
+        if (node_multiused || (multiple_users && !already_cloned)) {
+          TRACE("Cloning #%d", node->id());
           node = clone = jsgraph()->graph()->CloneNode(node);
-          if (FLAG_trace_turbo_escape) {
-            PrintF(" to #%d\n", node->id());
-          }
+          TRACE(" to #%d\n", node->id());
+          node_multiused = false;
+          already_cloned = true;
         }
         NodeProperties::ReplaceValueInput(node, object_state, node_index);
-        if (FLAG_trace_turbo_escape) {
-          PrintF("Replaced state #%d input #%d with object state #%d\n",
-                 node->id(), input->id(), object_state->id());
-        }
+        TRACE("Replaced state #%d input #%d with object state #%d\n",
+              node->id(), input->id(), object_state->id());
       } else {
-        if (FLAG_trace_turbo_escape) {
-          PrintF("No object state replacement available.\n");
-        }
+        TRACE("No object state replacement for #%d at effect #%d available.\n",
+              input->id(), effect->id());
+        UNREACHABLE();
       }
     }
   }
@@ -308,6 +332,36 @@
   return jsgraph_->isolate()->counters();
 }
 
+
+class EscapeAnalysisVerifier final : public AdvancedReducer {
+ public:
+  EscapeAnalysisVerifier(Editor* editor, EscapeAnalysis* escape_analysis)
+      : AdvancedReducer(editor), escape_analysis_(escape_analysis) {}
+
+  Reduction Reduce(Node* node) final {
+    switch (node->opcode()) {
+      case IrOpcode::kAllocate:
+        CHECK(!escape_analysis_->IsVirtual(node));
+        break;
+      default:
+        break;
+    }
+    return NoChange();
+  }
+
+ private:
+  EscapeAnalysis* escape_analysis_;
+};
+
+void EscapeAnalysisReducer::VerifyReplacement() const {
+#ifdef DEBUG
+  GraphReducer graph_reducer(zone(), jsgraph()->graph());
+  EscapeAnalysisVerifier verifier(&graph_reducer, escape_analysis());
+  graph_reducer.AddReducer(&verifier);
+  graph_reducer.ReduceGraph();
+#endif  // DEBUG
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/escape-analysis-reducer.h b/src/compiler/escape-analysis-reducer.h
index 1c0da16..12487b1 100644
--- a/src/compiler/escape-analysis-reducer.h
+++ b/src/compiler/escape-analysis-reducer.h
@@ -29,6 +29,10 @@
                         EscapeAnalysis* escape_analysis, Zone* zone);
 
   Reduction Reduce(Node* node) final;
+  void SetExistsVirtualAllocate(bool exists) {
+    exists_virtual_allocate_ = exists;
+  }
+  void VerifyReplacement() const;
 
  private:
   Reduction ReduceLoad(Node* node);
@@ -38,9 +42,9 @@
   Reduction ReduceReferenceEqual(Node* node);
   Reduction ReduceObjectIsSmi(Node* node);
   Reduction ReduceFrameStateUses(Node* node);
-  Node* ReduceFrameState(Node* node, Node* effect, bool multiple_users);
-  Node* ReduceStateValueInputs(Node* node, Node* effect, bool multiple_users);
+  Node* ReduceDeoptState(Node* node, Node* effect, bool multiple_users);
   Node* ReduceStateValueInput(Node* node, int node_index, Node* effect,
+                              bool node_multiused, bool already_cloned,
                               bool multiple_users);
 
   JSGraph* jsgraph() const { return jsgraph_; }
@@ -51,7 +55,10 @@
   JSGraph* const jsgraph_;
   EscapeAnalysis* escape_analysis_;
   Zone* const zone_;
-  BitVector visited_;
+  // _visited marks nodes we already processed (allocs, loads, stores)
+  // and nodes that do not need a visit from ReduceDeoptState etc.
+  BitVector fully_reduced_;
+  bool exists_virtual_allocate_;
 
   DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
 };
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
index af0ba6a..b1a12b2 100644
--- a/src/compiler/escape-analysis.cc
+++ b/src/compiler/escape-analysis.cc
@@ -24,106 +24,134 @@
 namespace internal {
 namespace compiler {
 
-const EscapeAnalysis::Alias EscapeAnalysis::kNotReachable =
-    std::numeric_limits<Alias>::max();
-const EscapeAnalysis::Alias EscapeAnalysis::kUntrackable =
-    std::numeric_limits<Alias>::max() - 1;
+using Alias = EscapeStatusAnalysis::Alias;
 
+#ifdef DEBUG
+#define TRACE(...)                                    \
+  do {                                                \
+    if (FLAG_trace_turbo_escape) PrintF(__VA_ARGS__); \
+  } while (false)
+#else
+#define TRACE(...)
+#endif
+
+const Alias EscapeStatusAnalysis::kNotReachable =
+    std::numeric_limits<Alias>::max();
+const Alias EscapeStatusAnalysis::kUntrackable =
+    std::numeric_limits<Alias>::max() - 1;
 
 class VirtualObject : public ZoneObject {
  public:
-  enum Status { kUntracked = 0, kTracked = 1 };
-  VirtualObject(NodeId id, Zone* zone)
+  enum Status {
+    kInitial = 0,
+    kTracked = 1u << 0,
+    kInitialized = 1u << 1,
+    kCopyRequired = 1u << 2,
+  };
+  typedef base::Flags<Status, unsigned char> StatusFlags;
+
+  VirtualObject(NodeId id, VirtualState* owner, Zone* zone)
       : id_(id),
-        status_(kUntracked),
+        status_(kInitial),
         fields_(zone),
         phi_(zone),
-        object_state_(nullptr) {}
+        object_state_(nullptr),
+        owner_(owner) {}
 
-  VirtualObject(const VirtualObject& other)
+  VirtualObject(VirtualState* owner, const VirtualObject& other)
       : id_(other.id_),
-        status_(other.status_),
+        status_(other.status_ & ~kCopyRequired),
         fields_(other.fields_),
         phi_(other.phi_),
-        object_state_(other.object_state_) {}
+        object_state_(other.object_state_),
+        owner_(owner) {}
 
-  VirtualObject(NodeId id, Zone* zone, size_t field_number)
+  VirtualObject(NodeId id, VirtualState* owner, Zone* zone, size_t field_number,
+                bool initialized)
       : id_(id),
-        status_(kTracked),
+        status_(kTracked | (initialized ? kInitialized : kInitial)),
         fields_(zone),
         phi_(zone),
-        object_state_(nullptr) {
+        object_state_(nullptr),
+        owner_(owner) {
     fields_.resize(field_number);
     phi_.resize(field_number, false);
   }
 
-  Node* GetField(size_t offset) {
-    if (offset < fields_.size()) {
-      return fields_[offset];
-    }
-    return nullptr;
-  }
+  Node* GetField(size_t offset) { return fields_[offset]; }
 
-  bool IsCreatedPhi(size_t offset) {
-    if (offset < phi_.size()) {
-      return phi_[offset];
-    }
-    return false;
-  }
+  bool IsCreatedPhi(size_t offset) { return phi_[offset]; }
 
-  bool SetField(size_t offset, Node* node, bool created_phi = false) {
-    bool changed = fields_[offset] != node || phi_[offset] != created_phi;
+  void SetField(size_t offset, Node* node, bool created_phi = false) {
     fields_[offset] = node;
     phi_[offset] = created_phi;
-    if (changed && FLAG_trace_turbo_escape && node) {
-      PrintF("Setting field %zu of #%d to #%d (%s)\n", offset, id(), node->id(),
-             node->op()->mnemonic());
-    }
-    return changed;
   }
-  bool IsVirtual() const { return status_ == kTracked; }
-  bool IsTracked() const { return status_ != kUntracked; }
+  bool IsTracked() const { return status_ & kTracked; }
+  bool IsInitialized() const { return status_ & kInitialized; }
+  bool SetInitialized() { return status_ |= kInitialized; }
+  VirtualState* owner() const { return owner_; }
 
   Node** fields_array() { return &fields_.front(); }
   size_t field_count() { return fields_.size(); }
   bool ResizeFields(size_t field_count) {
-    if (field_count != fields_.size()) {
+    if (field_count > fields_.size()) {
       fields_.resize(field_count);
       phi_.resize(field_count);
       return true;
     }
     return false;
   }
-  bool ClearAllFields() {
-    bool changed = false;
+  void ClearAllFields() {
     for (size_t i = 0; i < fields_.size(); ++i) {
-      if (fields_[i] != nullptr) {
-        fields_[i] = nullptr;
-        changed = true;
-      }
+      fields_[i] = nullptr;
       phi_[i] = false;
     }
-    return changed;
+  }
+  bool AllFieldsClear() {
+    for (size_t i = 0; i < fields_.size(); ++i) {
+      if (fields_[i] != nullptr) {
+        return false;
+      }
+    }
+    return true;
   }
   bool UpdateFrom(const VirtualObject& other);
+  bool MergeFrom(MergeCache* cache, Node* at, Graph* graph,
+                 CommonOperatorBuilder* common);
   void SetObjectState(Node* node) { object_state_ = node; }
   Node* GetObjectState() const { return object_state_; }
+  bool IsCopyRequired() const { return status_ & kCopyRequired; }
+  void SetCopyRequired() { status_ |= kCopyRequired; }
+  bool NeedCopyForModification() {
+    if (!IsCopyRequired() || !IsInitialized()) {
+      return false;
+    }
+    return true;
+  }
 
   NodeId id() const { return id_; }
   void id(NodeId id) { id_ = id; }
 
  private:
+  bool MergeFields(size_t i, Node* at, MergeCache* cache, Graph* graph,
+                   CommonOperatorBuilder* common);
+
   NodeId id_;
-  Status status_;
+  StatusFlags status_;
   ZoneVector<Node*> fields_;
   ZoneVector<bool> phi_;
   Node* object_state_;
+  VirtualState* owner_;
+
+  DISALLOW_COPY_AND_ASSIGN(VirtualObject);
 };
 
+DEFINE_OPERATORS_FOR_FLAGS(VirtualObject::StatusFlags)
 
 bool VirtualObject::UpdateFrom(const VirtualObject& other) {
   bool changed = status_ != other.status_;
   status_ = other.status_;
+  phi_ = other.phi_;
   if (fields_.size() != other.fields_.size()) {
     fields_ = other.fields_;
     return true;
@@ -137,36 +165,49 @@
   return changed;
 }
 
-
 class VirtualState : public ZoneObject {
  public:
-  VirtualState(Zone* zone, size_t size);
-  VirtualState(const VirtualState& states);
+  VirtualState(Node* owner, Zone* zone, size_t size)
+      : info_(size, nullptr, zone), owner_(owner) {}
+
+  VirtualState(Node* owner, const VirtualState& state)
+      : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
+        owner_(owner) {
+    for (size_t i = 0; i < info_.size(); ++i) {
+      if (state.info_[i]) {
+        info_[i] = state.info_[i];
+      }
+    }
+  }
 
   VirtualObject* VirtualObjectFromAlias(size_t alias);
-  VirtualObject* GetOrCreateTrackedVirtualObject(EscapeAnalysis::Alias alias,
-                                                 NodeId id, Zone* zone);
-  void SetVirtualObject(EscapeAnalysis::Alias alias, VirtualObject* state);
-  void LastChangedAt(Node* node) { last_changed_ = node; }
-  Node* GetLastChanged() { return last_changed_; }
+  void SetVirtualObject(Alias alias, VirtualObject* state);
   bool UpdateFrom(VirtualState* state, Zone* zone);
   bool MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
-                 CommonOperatorBuilder* common, Node* control);
+                 CommonOperatorBuilder* common, Node* at);
   size_t size() const { return info_.size(); }
+  Node* owner() const { return owner_; }
+  VirtualObject* Copy(VirtualObject* obj, Alias alias);
+  void SetCopyRequired() {
+    for (VirtualObject* obj : info_) {
+      if (obj) obj->SetCopyRequired();
+    }
+  }
 
  private:
   ZoneVector<VirtualObject*> info_;
-  Node* last_changed_;
-};
+  Node* owner_;
 
+  DISALLOW_COPY_AND_ASSIGN(VirtualState);
+};
 
 class MergeCache : public ZoneObject {
  public:
   explicit MergeCache(Zone* zone)
       : states_(zone), objects_(zone), fields_(zone) {
-    states_.reserve(4);
-    objects_.reserve(4);
-    fields_.reserve(4);
+    states_.reserve(5);
+    objects_.reserve(5);
+    fields_.reserve(5);
   }
   ZoneVector<VirtualState*>& states() { return states_; }
   ZoneVector<VirtualObject*>& objects() { return objects_; }
@@ -176,20 +217,20 @@
     objects_.clear();
     fields_.clear();
   }
-  size_t LoadVirtualObjectsFromStatesFor(EscapeAnalysis::Alias alias);
-  void LoadVirtualObjectsForFieldsFrom(
-      VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases);
+  size_t LoadVirtualObjectsFromStatesFor(Alias alias);
+  void LoadVirtualObjectsForFieldsFrom(VirtualState* state,
+                                       const ZoneVector<Alias>& aliases);
   Node* GetFields(size_t pos);
 
  private:
   ZoneVector<VirtualState*> states_;
   ZoneVector<VirtualObject*> objects_;
   ZoneVector<Node*> fields_;
+
+  DISALLOW_COPY_AND_ASSIGN(MergeCache);
 };
 
-
-size_t MergeCache::LoadVirtualObjectsFromStatesFor(
-    EscapeAnalysis::Alias alias) {
+size_t MergeCache::LoadVirtualObjectsFromStatesFor(Alias alias) {
   objects_.clear();
   DCHECK_GT(states_.size(), 0u);
   size_t min = std::numeric_limits<size_t>::max();
@@ -202,13 +243,12 @@
   return min;
 }
 
-
 void MergeCache::LoadVirtualObjectsForFieldsFrom(
-    VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases) {
+    VirtualState* state, const ZoneVector<Alias>& aliases) {
   objects_.clear();
   size_t max_alias = state->size();
   for (Node* field : fields_) {
-    EscapeAnalysis::Alias alias = aliases[field->id()];
+    Alias alias = aliases[field->id()];
     if (alias >= max_alias) continue;
     if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
       objects_.push_back(obj);
@@ -216,11 +256,13 @@
   }
 }
 
-
 Node* MergeCache::GetFields(size_t pos) {
   fields_.clear();
-  Node* rep = objects_.front()->GetField(pos);
+  Node* rep = pos >= objects_.front()->field_count()
+                  ? nullptr
+                  : objects_.front()->GetField(pos);
   for (VirtualObject* obj : objects_) {
+    if (pos >= obj->field_count()) continue;
     Node* field = obj->GetField(pos);
     if (field) {
       fields_.push_back(field);
@@ -232,72 +274,48 @@
   return rep;
 }
 
-
-VirtualState::VirtualState(Zone* zone, size_t size)
-    : info_(size, nullptr, zone), last_changed_(nullptr) {}
-
-
-VirtualState::VirtualState(const VirtualState& state)
-    : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
-      last_changed_(state.last_changed_) {
-  for (size_t i = 0; i < state.info_.size(); ++i) {
-    if (state.info_[i]) {
-      info_[i] =
-          new (info_.get_allocator().zone()) VirtualObject(*state.info_[i]);
-    }
-  }
+VirtualObject* VirtualState::Copy(VirtualObject* obj, Alias alias) {
+  if (obj->owner() == this) return obj;
+  VirtualObject* new_obj =
+      new (info_.get_allocator().zone()) VirtualObject(this, *obj);
+  TRACE("At state %p, alias @%d (#%d), copying virtual object from %p to %p\n",
+        static_cast<void*>(this), alias, obj->id(), static_cast<void*>(obj),
+        static_cast<void*>(new_obj));
+  info_[alias] = new_obj;
+  return new_obj;
 }
 
-
 VirtualObject* VirtualState::VirtualObjectFromAlias(size_t alias) {
   return info_[alias];
 }
 
-
-VirtualObject* VirtualState::GetOrCreateTrackedVirtualObject(
-    EscapeAnalysis::Alias alias, NodeId id, Zone* zone) {
-  if (VirtualObject* obj = VirtualObjectFromAlias(alias)) {
-    return obj;
-  }
-  VirtualObject* obj = new (zone) VirtualObject(id, zone, 0);
-  SetVirtualObject(alias, obj);
-  return obj;
-}
-
-
-void VirtualState::SetVirtualObject(EscapeAnalysis::Alias alias,
-                                    VirtualObject* obj) {
+void VirtualState::SetVirtualObject(Alias alias, VirtualObject* obj) {
   info_[alias] = obj;
 }
 
-
 bool VirtualState::UpdateFrom(VirtualState* from, Zone* zone) {
+  if (from == this) return false;
   bool changed = false;
-  for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
+  for (Alias alias = 0; alias < size(); ++alias) {
     VirtualObject* ls = VirtualObjectFromAlias(alias);
     VirtualObject* rs = from->VirtualObjectFromAlias(alias);
 
-    if (rs == nullptr) {
-      continue;
-    }
+    if (ls == rs || rs == nullptr) continue;
 
     if (ls == nullptr) {
-      ls = new (zone) VirtualObject(*rs);
+      ls = new (zone) VirtualObject(this, *rs);
       SetVirtualObject(alias, ls);
       changed = true;
       continue;
     }
 
-    if (FLAG_trace_turbo_escape) {
-      PrintF("  Updating fields of @%d\n", alias);
-    }
+    TRACE("  Updating fields of @%d\n", alias);
 
     changed = ls->UpdateFrom(*rs) || changed;
   }
   return false;
 }
 
-
 namespace {
 
 bool IsEquivalentPhi(Node* node1, Node* node2) {
@@ -316,7 +334,6 @@
   return true;
 }
 
-
 bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
   if (phi->opcode() != IrOpcode::kPhi) return false;
   if (phi->op()->ValueInputCount() != inputs.size()) {
@@ -333,186 +350,225 @@
 
 }  // namespace
 
-
-Node* EscapeAnalysis::GetReplacementIfSame(ZoneVector<VirtualObject*>& objs) {
-  Node* rep = GetReplacement(objs.front()->id());
-  for (VirtualObject* obj : objs) {
-    if (GetReplacement(obj->id()) != rep) {
-      return nullptr;
+bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
+                                Graph* graph, CommonOperatorBuilder* common) {
+  bool changed = false;
+  int value_input_count = static_cast<int>(cache->fields().size());
+  Node* rep = GetField(i);
+  if (!rep || !IsCreatedPhi(i)) {
+    Node* control = NodeProperties::GetControlInput(at);
+    cache->fields().push_back(control);
+    Node* phi = graph->NewNode(
+        common->Phi(MachineRepresentation::kTagged, value_input_count),
+        value_input_count + 1, &cache->fields().front());
+    SetField(i, phi, true);
+#ifdef DEBUG
+    if (FLAG_trace_turbo_escape) {
+      PrintF("    Creating Phi #%d as merge of", phi->id());
+      for (int i = 0; i < value_input_count; i++) {
+        PrintF(" #%d (%s)", cache->fields()[i]->id(),
+               cache->fields()[i]->op()->mnemonic());
+      }
+      PrintF("\n");
+    }
+#endif
+    changed = true;
+  } else {
+    DCHECK(rep->opcode() == IrOpcode::kPhi);
+    for (int n = 0; n < value_input_count; ++n) {
+      Node* old = NodeProperties::GetValueInput(rep, n);
+      if (old != cache->fields()[n]) {
+        changed = true;
+        NodeProperties::ReplaceValueInput(rep, cache->fields()[n], n);
+      }
     }
   }
-  return rep;
+  return changed;
 }
 
+bool VirtualObject::MergeFrom(MergeCache* cache, Node* at, Graph* graph,
+                              CommonOperatorBuilder* common) {
+  DCHECK(at->opcode() == IrOpcode::kEffectPhi ||
+         at->opcode() == IrOpcode::kPhi);
+  bool changed = false;
+  for (size_t i = 0; i < field_count(); ++i) {
+    if (Node* field = cache->GetFields(i)) {
+      changed = changed || GetField(i) != field;
+      SetField(i, field);
+      TRACE("    Field %zu agree on rep #%d\n", i, field->id());
+    } else {
+      int arity = at->opcode() == IrOpcode::kEffectPhi
+                      ? at->op()->EffectInputCount()
+                      : at->op()->ValueInputCount();
+      if (cache->fields().size() == arity) {
+        changed = MergeFields(i, at, cache, graph, common) || changed;
+      } else {
+        if (GetField(i) != nullptr) {
+          TRACE("    Field %zu cleared\n", i);
+          changed = true;
+        }
+        SetField(i, nullptr);
+      }
+    }
+  }
+  return changed;
+}
 
 bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
-                             CommonOperatorBuilder* common, Node* control) {
+                             CommonOperatorBuilder* common, Node* at) {
   DCHECK_GT(cache->states().size(), 0u);
   bool changed = false;
-  for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
-    size_t fields = cache->LoadVirtualObjectsFromStatesFor(alias);
-    if (cache->objects().size() == cache->states().size()) {
-      if (FLAG_trace_turbo_escape) {
-        PrintF("  Merging virtual objects of @%d\n", alias);
-      }
-      VirtualObject* mergeObject = GetOrCreateTrackedVirtualObject(
-          alias, cache->objects().front()->id(), zone);
-      changed = mergeObject->ResizeFields(fields) || changed;
-      for (size_t i = 0; i < fields; ++i) {
-        if (Node* field = cache->GetFields(i)) {
-          changed = mergeObject->SetField(i, field) || changed;
-          if (FLAG_trace_turbo_escape) {
-            PrintF("    Field %zu agree on rep #%d\n", i, field->id());
-          }
-        } else {
-          int value_input_count = static_cast<int>(cache->fields().size());
-          if (cache->fields().size() == cache->objects().size()) {
-            Node* rep = mergeObject->GetField(i);
-            if (!rep || !mergeObject->IsCreatedPhi(i)) {
-              cache->fields().push_back(control);
-              Node* phi = graph->NewNode(
-                  common->Phi(MachineRepresentation::kTagged,
-                              value_input_count),
-                  value_input_count + 1, &cache->fields().front());
-              mergeObject->SetField(i, phi, true);
-              if (FLAG_trace_turbo_escape) {
-                PrintF("    Creating Phi #%d as merge of", phi->id());
-                for (int i = 0; i < value_input_count; i++) {
-                  PrintF(" #%d (%s)", cache->fields()[i]->id(),
-                         cache->fields()[i]->op()->mnemonic());
-                }
-                PrintF("\n");
-              }
-              changed = true;
-            } else {
-              DCHECK(rep->opcode() == IrOpcode::kPhi);
-              for (int n = 0; n < value_input_count; ++n) {
-                if (n < rep->op()->ValueInputCount()) {
-                  Node* old = NodeProperties::GetValueInput(rep, n);
-                  if (old != cache->fields()[n]) {
-                    changed = true;
-                    NodeProperties::ReplaceValueInput(rep, cache->fields()[n],
-                                                      n);
-                  }
-                } else {
-                  changed = true;
-                  rep->InsertInput(graph->zone(), n, cache->fields()[n]);
-                }
-              }
-              if (rep->op()->ValueInputCount() != value_input_count) {
-                if (FLAG_trace_turbo_escape) {
-                  PrintF("    Widening Phi #%d of arity %d to %d", rep->id(),
-                         rep->op()->ValueInputCount(), value_input_count);
-                }
-                NodeProperties::ChangeOp(
-                    rep, common->Phi(MachineRepresentation::kTagged,
-                                     value_input_count));
-              }
-            }
-          } else {
-            changed = mergeObject->SetField(i, nullptr) || changed;
-          }
+  for (Alias alias = 0; alias < size(); ++alias) {
+    cache->objects().clear();
+    VirtualObject* mergeObject = VirtualObjectFromAlias(alias);
+    bool copy_merge_object = false;
+    size_t fields = std::numeric_limits<size_t>::max();
+    for (VirtualState* state : cache->states()) {
+      if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
+        cache->objects().push_back(obj);
+        if (mergeObject == obj) {
+          copy_merge_object = true;
         }
+        fields = std::min(obj->field_count(), fields);
       }
+    }
+    if (cache->objects().size() == cache->states().size()) {
+      if (!mergeObject) {
+        VirtualObject* obj = new (zone)
+            VirtualObject(cache->objects().front()->id(), this, zone, fields,
+                          cache->objects().front()->IsInitialized());
+        SetVirtualObject(alias, obj);
+        mergeObject = obj;
+        changed = true;
+      } else if (copy_merge_object) {
+        VirtualObject* obj = new (zone) VirtualObject(this, *mergeObject);
+        SetVirtualObject(alias, obj);
+        mergeObject = obj;
+        changed = true;
+      } else {
+        changed = mergeObject->ResizeFields(fields) || changed;
+      }
+#ifdef DEBUG
+      if (FLAG_trace_turbo_escape) {
+        PrintF("  Alias @%d, merging into %p virtual objects", alias,
+               static_cast<void*>(mergeObject));
+        for (size_t i = 0; i < cache->objects().size(); i++) {
+          PrintF(" %p", static_cast<void*>(cache->objects()[i]));
+        }
+        PrintF("\n");
+      }
+#endif  // DEBUG
+      changed = mergeObject->MergeFrom(cache, at, graph, common) || changed;
     } else {
+      if (mergeObject) {
+        TRACE("  Alias %d, virtual object removed\n", alias);
+        changed = true;
+      }
       SetVirtualObject(alias, nullptr);
     }
   }
   return changed;
 }
 
-
 EscapeStatusAnalysis::EscapeStatusAnalysis(EscapeAnalysis* object_analysis,
                                            Graph* graph, Zone* zone)
-    : object_analysis_(object_analysis),
+    : stack_(zone),
+      object_analysis_(object_analysis),
       graph_(graph),
       zone_(zone),
-      status_(graph->NodeCount(), kUnknown, zone),
-      queue_(zone) {}
-
+      status_(zone),
+      next_free_alias_(0),
+      status_stack_(zone),
+      aliases_(zone) {}
 
 EscapeStatusAnalysis::~EscapeStatusAnalysis() {}
 
-
 bool EscapeStatusAnalysis::HasEntry(Node* node) {
   return status_[node->id()] & (kTracked | kEscaped);
 }
 
-
 bool EscapeStatusAnalysis::IsVirtual(Node* node) {
-  return (status_[node->id()] & kTracked) && !(status_[node->id()] & kEscaped);
+  return IsVirtual(node->id());
 }
 
+bool EscapeStatusAnalysis::IsVirtual(NodeId id) {
+  return (status_[id] & kTracked) && !(status_[id] & kEscaped);
+}
 
 bool EscapeStatusAnalysis::IsEscaped(Node* node) {
   return status_[node->id()] & kEscaped;
 }
 
-
 bool EscapeStatusAnalysis::IsAllocation(Node* node) {
   return node->opcode() == IrOpcode::kAllocate ||
          node->opcode() == IrOpcode::kFinishRegion;
 }
 
-
 bool EscapeStatusAnalysis::SetEscaped(Node* node) {
   bool changed = !(status_[node->id()] & kEscaped);
   status_[node->id()] |= kEscaped | kTracked;
   return changed;
 }
 
-
-void EscapeStatusAnalysis::Resize() {
-  status_.resize(graph()->NodeCount(), kUnknown);
+bool EscapeStatusAnalysis::IsInQueue(NodeId id) {
+  return status_[id] & kInQueue;
 }
 
-
-size_t EscapeStatusAnalysis::size() { return status_.size(); }
-
-
-void EscapeStatusAnalysis::Run() {
-  Resize();
-  queue_.push_back(graph()->end());
-  status_[graph()->end()->id()] |= kOnStack;
-  while (!queue_.empty()) {
-    Node* node = queue_.front();
-    queue_.pop_front();
-    status_[node->id()] &= ~kOnStack;
-    Process(node);
-    status_[node->id()] |= kVisited;
-    for (Edge edge : node->input_edges()) {
-      Node* input = edge.to();
-      if (!(status_[input->id()] & (kVisited | kOnStack))) {
-        queue_.push_back(input);
-        status_[input->id()] |= kOnStack;
-      }
-    }
+void EscapeStatusAnalysis::SetInQueue(NodeId id, bool on_stack) {
+  if (on_stack) {
+    status_[id] |= kInQueue;
+  } else {
+    status_[id] &= ~kInQueue;
   }
 }
 
+void EscapeStatusAnalysis::ResizeStatusVector() {
+  if (status_.size() <= graph()->NodeCount()) {
+    status_.resize(graph()->NodeCount() * 1.1, kUnknown);
+  }
+}
+
+size_t EscapeStatusAnalysis::GetStatusVectorSize() { return status_.size(); }
+
+void EscapeStatusAnalysis::RunStatusAnalysis() {
+  ResizeStatusVector();
+  while (!status_stack_.empty()) {
+    Node* node = status_stack_.back();
+    status_stack_.pop_back();
+    status_[node->id()] &= ~kOnStack;
+    Process(node);
+    status_[node->id()] |= kVisited;
+  }
+}
+
+void EscapeStatusAnalysis::EnqueueForStatusAnalysis(Node* node) {
+  DCHECK_NOT_NULL(node);
+  if (!(status_[node->id()] & kOnStack)) {
+    status_stack_.push_back(node);
+    status_[node->id()] |= kOnStack;
+  }
+}
 
 void EscapeStatusAnalysis::RevisitInputs(Node* node) {
   for (Edge edge : node->input_edges()) {
     Node* input = edge.to();
     if (!(status_[input->id()] & kOnStack)) {
-      queue_.push_back(input);
+      status_stack_.push_back(input);
       status_[input->id()] |= kOnStack;
     }
   }
 }
 
-
 void EscapeStatusAnalysis::RevisitUses(Node* node) {
   for (Edge edge : node->use_edges()) {
     Node* use = edge.from();
-    if (!(status_[use->id()] & kOnStack)) {
-      queue_.push_back(use);
+    if (!(status_[use->id()] & kOnStack) && !IsNotReachable(use)) {
+      status_stack_.push_back(use);
       status_[use->id()] |= kOnStack;
     }
   }
 }
 
-
 void EscapeStatusAnalysis::Process(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kAllocate:
@@ -535,15 +591,17 @@
           RevisitUses(rep);
         }
       }
+      RevisitUses(node);
       break;
     }
     case IrOpcode::kPhi:
       if (!HasEntry(node)) {
         status_[node->id()] |= kTracked;
-        if (!IsAllocationPhi(node)) {
-          SetEscaped(node);
-          RevisitUses(node);
-        }
+        RevisitUses(node);
+      }
+      if (!IsAllocationPhi(node) && SetEscaped(node)) {
+        RevisitInputs(node);
+        RevisitUses(node);
       }
       CheckUsesForEscape(node);
     default:
@@ -551,7 +609,6 @@
   }
 }
 
-
 bool EscapeStatusAnalysis::IsAllocationPhi(Node* node) {
   for (Edge edge : node->input_edges()) {
     Node* input = edge.to();
@@ -562,7 +619,6 @@
   return true;
 }
 
-
 void EscapeStatusAnalysis::ProcessStoreField(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
   Node* to = NodeProperties::GetValueInput(node, 0);
@@ -570,14 +626,11 @@
   if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
     RevisitUses(val);
     RevisitInputs(val);
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
-             val->id(), val->op()->mnemonic(), to->id());
-    }
+    TRACE("Setting #%d (%s) to escaped because of store to field of #%d\n",
+          val->id(), val->op()->mnemonic(), to->id());
   }
 }
 
-
 void EscapeStatusAnalysis::ProcessStoreElement(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
   Node* to = NodeProperties::GetValueInput(node, 0);
@@ -585,34 +638,27 @@
   if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
     RevisitUses(val);
     RevisitInputs(val);
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
-             val->id(), val->op()->mnemonic(), to->id());
-    }
+    TRACE("Setting #%d (%s) to escaped because of store to field of #%d\n",
+          val->id(), val->op()->mnemonic(), to->id());
   }
 }
 
-
 void EscapeStatusAnalysis::ProcessAllocate(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
   if (!HasEntry(node)) {
     status_[node->id()] |= kTracked;
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Created status entry for node #%d (%s)\n", node->id(),
-             node->op()->mnemonic());
-    }
+    TRACE("Created status entry for node #%d (%s)\n", node->id(),
+          node->op()->mnemonic());
     NumberMatcher size(node->InputAt(0));
     DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
            node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
            node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
            node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
+    RevisitUses(node);
     if (!size.HasValue() && SetEscaped(node)) {
-      RevisitUses(node);
-      if (FLAG_trace_turbo_escape) {
-        PrintF("Setting #%d to escaped because of non-const alloc\n",
-               node->id());
-      }
-      // This node is known to escape, uses do not have to be checked.
+      TRACE("Setting #%d to escaped because of non-const alloc\n", node->id());
+      // This node is already known to escape, uses do not have to be checked
+      // for escape.
       return;
     }
   }
@@ -621,24 +667,22 @@
   }
 }
 
-
 bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
                                               bool phi_escaping) {
   for (Edge edge : uses->use_edges()) {
     Node* use = edge.from();
+    if (IsNotReachable(use)) continue;
     if (edge.index() >= use->op()->ValueInputCount() +
                             OperatorProperties::GetContextInputCount(use->op()))
       continue;
     switch (use->opcode()) {
       case IrOpcode::kPhi:
         if (phi_escaping && SetEscaped(rep)) {
-          if (FLAG_trace_turbo_escape) {
-            PrintF(
-                "Setting #%d (%s) to escaped because of use by phi node "
-                "#%d (%s)\n",
-                rep->id(), rep->op()->mnemonic(), use->id(),
-                use->op()->mnemonic());
-          }
+          TRACE(
+              "Setting #%d (%s) to escaped because of use by phi node "
+              "#%d (%s)\n",
+              rep->id(), rep->op()->mnemonic(), use->id(),
+              use->op()->mnemonic());
           return true;
         }
       // Fallthrough.
@@ -651,37 +695,41 @@
       case IrOpcode::kReferenceEqual:
       case IrOpcode::kFinishRegion:
         if (IsEscaped(use) && SetEscaped(rep)) {
-          if (FLAG_trace_turbo_escape) {
-            PrintF(
-                "Setting #%d (%s) to escaped because of use by escaping node "
-                "#%d (%s)\n",
-                rep->id(), rep->op()->mnemonic(), use->id(),
-                use->op()->mnemonic());
-          }
+          TRACE(
+              "Setting #%d (%s) to escaped because of use by escaping node "
+              "#%d (%s)\n",
+              rep->id(), rep->op()->mnemonic(), use->id(),
+              use->op()->mnemonic());
           return true;
         }
         break;
       case IrOpcode::kObjectIsSmi:
         if (!IsAllocation(rep) && SetEscaped(rep)) {
-          PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
-                 rep->id(), rep->op()->mnemonic(), use->id(),
-                 use->op()->mnemonic());
+          TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+                rep->id(), rep->op()->mnemonic(), use->id(),
+                use->op()->mnemonic());
+          return true;
+        }
+        break;
+      case IrOpcode::kSelect:
+        if (SetEscaped(rep)) {
+          TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+                rep->id(), rep->op()->mnemonic(), use->id(),
+                use->op()->mnemonic());
           return true;
         }
         break;
       default:
         if (use->op()->EffectInputCount() == 0 &&
             uses->op()->EffectInputCount() > 0) {
-          PrintF("Encountered unaccounted use by #%d (%s)\n", use->id(),
-                 use->op()->mnemonic());
+          TRACE("Encountered unaccounted use by #%d (%s)\n", use->id(),
+                use->op()->mnemonic());
           UNREACHABLE();
         }
         if (SetEscaped(rep)) {
-          if (FLAG_trace_turbo_escape) {
-            PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
-                   rep->id(), rep->op()->mnemonic(), use->id(),
-                   use->op()->mnemonic());
-          }
+          TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+                rep->id(), rep->op()->mnemonic(), use->id(),
+                use->op()->mnemonic());
           return true;
         }
     }
@@ -689,7 +737,6 @@
   return false;
 }
 
-
 void EscapeStatusAnalysis::ProcessFinishRegion(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
   if (!HasEntry(node)) {
@@ -701,7 +748,6 @@
   }
 }
 
-
 void EscapeStatusAnalysis::DebugPrint() {
   for (NodeId id = 0; id < status_.size(); id++) {
     if (status_[id] & kTracked) {
@@ -711,58 +757,69 @@
   }
 }
 
-
 EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
                                Zone* zone)
-    : graph_(graph),
+    : status_analysis_(this, graph, zone),
       common_(common),
-      zone_(zone),
       virtual_states_(zone),
       replacements_(zone),
-      escape_status_(this, graph, zone),
-      cache_(new (zone) MergeCache(zone)),
-      aliases_(zone),
-      next_free_alias_(0) {}
-
+      cache_(nullptr) {}
 
 EscapeAnalysis::~EscapeAnalysis() {}
 
-
 void EscapeAnalysis::Run() {
   replacements_.resize(graph()->NodeCount());
-  AssignAliases();
-  RunObjectAnalysis();
-  escape_status_.Run();
+  status_analysis_.AssignAliases();
+  if (status_analysis_.AliasCount() > 0) {
+    cache_ = new (zone()) MergeCache(zone());
+    replacements_.resize(graph()->NodeCount());
+    status_analysis_.ResizeStatusVector();
+    RunObjectAnalysis();
+    status_analysis_.RunStatusAnalysis();
+  }
 }
 
-
-void EscapeAnalysis::AssignAliases() {
-  ZoneVector<Node*> stack(zone());
-  stack.push_back(graph()->end());
+void EscapeStatusAnalysis::AssignAliases() {
+  size_t max_size = 1024;
+  size_t min_size = 32;
+  size_t stack_size =
+      std::min(std::max(graph()->NodeCount() / 5, min_size), max_size);
+  stack_.reserve(stack_size);
+  ResizeStatusVector();
+  stack_.push_back(graph()->end());
   CHECK_LT(graph()->NodeCount(), kUntrackable);
   aliases_.resize(graph()->NodeCount(), kNotReachable);
   aliases_[graph()->end()->id()] = kUntrackable;
-  while (!stack.empty()) {
-    Node* node = stack.back();
-    stack.pop_back();
+  status_stack_.reserve(8);
+  TRACE("Discovering trackable nodes");
+  while (!stack_.empty()) {
+    Node* node = stack_.back();
+    stack_.pop_back();
     switch (node->opcode()) {
       case IrOpcode::kAllocate:
         if (aliases_[node->id()] >= kUntrackable) {
           aliases_[node->id()] = NextAlias();
+          TRACE(" @%d:%s#%u", aliases_[node->id()], node->op()->mnemonic(),
+                node->id());
+          EnqueueForStatusAnalysis(node);
         }
         break;
       case IrOpcode::kFinishRegion: {
         Node* allocate = NodeProperties::GetValueInput(node, 0);
+        DCHECK_NOT_NULL(allocate);
         if (allocate->opcode() == IrOpcode::kAllocate) {
           if (aliases_[allocate->id()] >= kUntrackable) {
             if (aliases_[allocate->id()] == kNotReachable) {
-              stack.push_back(allocate);
+              stack_.push_back(allocate);
             }
             aliases_[allocate->id()] = NextAlias();
+            TRACE(" @%d:%s#%u", aliases_[allocate->id()],
+                  allocate->op()->mnemonic(), allocate->id());
+            EnqueueForStatusAnalysis(allocate);
           }
           aliases_[node->id()] = aliases_[allocate->id()];
-        } else {
-          aliases_[node->id()] = NextAlias();
+          TRACE(" @%d:%s#%u", aliases_[node->id()], node->op()->mnemonic(),
+                node->id());
         }
         break;
       }
@@ -773,81 +830,119 @@
     for (Edge edge : node->input_edges()) {
       Node* input = edge.to();
       if (aliases_[input->id()] == kNotReachable) {
-        stack.push_back(input);
+        stack_.push_back(input);
         aliases_[input->id()] = kUntrackable;
       }
     }
   }
-
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Discovered trackable nodes");
-    for (EscapeAnalysis::Alias id = 0; id < graph()->NodeCount(); ++id) {
-      if (aliases_[id] < kUntrackable) {
-        if (FLAG_trace_turbo_escape) {
-          PrintF(" #%u", id);
-        }
-      }
-    }
-    PrintF("\n");
-  }
+  TRACE("\n");
 }
 
+bool EscapeStatusAnalysis::IsNotReachable(Node* node) {
+  if (node->id() >= aliases_.size()) {
+    return false;
+  }
+  return aliases_[node->id()] == kNotReachable;
+}
 
 void EscapeAnalysis::RunObjectAnalysis() {
   virtual_states_.resize(graph()->NodeCount());
-  ZoneVector<Node*> stack(zone());
-  stack.push_back(graph()->start());
-  while (!stack.empty()) {
-    Node* node = stack.back();
-    stack.pop_back();
-    if (aliases_[node->id()] != kNotReachable && Process(node)) {
+  ZoneDeque<Node*> queue(zone());
+  queue.push_back(graph()->start());
+  ZoneVector<Node*> danglers(zone());
+  while (!queue.empty()) {
+    Node* node = queue.back();
+    queue.pop_back();
+    status_analysis_.SetInQueue(node->id(), false);
+    if (Process(node)) {
       for (Edge edge : node->use_edges()) {
+        Node* use = edge.from();
+        if (IsNotReachable(use)) {
+          continue;
+        }
         if (NodeProperties::IsEffectEdge(edge)) {
-          Node* use = edge.from();
-          if ((use->opcode() != IrOpcode::kLoadField &&
-               use->opcode() != IrOpcode::kLoadElement) ||
-              !IsDanglingEffectNode(use)) {
-            stack.push_back(use);
+          // Iteration order: depth first, but delay phis.
+          // We need DFS do avoid some duplication of VirtualStates and
+          // VirtualObjects, and we want to delay phis to improve performance.
+          if (use->opcode() == IrOpcode::kEffectPhi) {
+            if (!status_analysis_.IsInQueue(use->id())) {
+              queue.push_front(use);
+            }
+          } else if ((use->opcode() != IrOpcode::kLoadField &&
+                      use->opcode() != IrOpcode::kLoadElement) ||
+                     !IsDanglingEffectNode(use)) {
+            if (!status_analysis_.IsInQueue(use->id())) {
+              status_analysis_.SetInQueue(use->id(), true);
+              queue.push_back(use);
+            }
+          } else {
+            danglers.push_back(use);
           }
         }
       }
-      // First process loads: dangling loads are a problem otherwise.
-      for (Edge edge : node->use_edges()) {
-        if (NodeProperties::IsEffectEdge(edge)) {
-          Node* use = edge.from();
-          if ((use->opcode() == IrOpcode::kLoadField ||
-               use->opcode() == IrOpcode::kLoadElement) &&
-              IsDanglingEffectNode(use)) {
-            stack.push_back(use);
-          }
-        }
-      }
+      // Danglers need to be processed immediately, even if they are
+      // on the stack. Since they do not have effect outputs,
+      // we don't have to track whether they are on the stack.
+      queue.insert(queue.end(), danglers.begin(), danglers.end());
+      danglers.clear();
     }
   }
+#ifdef DEBUG
   if (FLAG_trace_turbo_escape) {
     DebugPrint();
   }
+#endif
 }
 
-
-bool EscapeAnalysis::IsDanglingEffectNode(Node* node) {
-  if (node->op()->EffectInputCount() == 0) return false;
-  if (node->op()->EffectOutputCount() == 0) return false;
-  if (node->op()->EffectInputCount() == 1 &&
-      NodeProperties::GetEffectInput(node)->opcode() == IrOpcode::kStart) {
+bool EscapeStatusAnalysis::IsDanglingEffectNode(Node* node) {
+  if (status_[node->id()] & kDanglingComputed) {
+    return status_[node->id()] & kDangling;
+  }
+  if (node->op()->EffectInputCount() == 0 ||
+      node->op()->EffectOutputCount() == 0 ||
+      (node->op()->EffectInputCount() == 1 &&
+       NodeProperties::GetEffectInput(node)->opcode() == IrOpcode::kStart)) {
     // The start node is used as sentinel for nodes that are in general
     // effectful, but of which an analysis has determined that they do not
     // produce effects in this instance. We don't consider these nodes dangling.
+    status_[node->id()] |= kDanglingComputed;
     return false;
   }
   for (Edge edge : node->use_edges()) {
+    Node* use = edge.from();
+    if (aliases_[use->id()] == kNotReachable) continue;
     if (NodeProperties::IsEffectEdge(edge)) {
+      status_[node->id()] |= kDanglingComputed;
       return false;
     }
   }
+  status_[node->id()] |= kDanglingComputed | kDangling;
   return true;
 }
 
+bool EscapeStatusAnalysis::IsEffectBranchPoint(Node* node) {
+  if (status_[node->id()] & kBranchPointComputed) {
+    return status_[node->id()] & kBranchPoint;
+  }
+  int count = 0;
+  for (Edge edge : node->use_edges()) {
+    Node* use = edge.from();
+    if (aliases_[use->id()] == kNotReachable) continue;
+    if (NodeProperties::IsEffectEdge(edge)) {
+      if ((use->opcode() == IrOpcode::kLoadField ||
+           use->opcode() == IrOpcode::kLoadElement ||
+           use->opcode() == IrOpcode::kLoad) &&
+          IsDanglingEffectNode(use))
+        continue;
+      if (++count > 1) {
+        status_[node->id()] |= kBranchPointComputed | kBranchPoint;
+        return true;
+      }
+    }
+  }
+  status_[node->id()] |= kBranchPointComputed;
+  return false;
+}
 
 bool EscapeAnalysis::Process(Node* node) {
   switch (node->opcode()) {
@@ -888,12 +983,12 @@
   return true;
 }
 
-
 void EscapeAnalysis::ProcessAllocationUsers(Node* node) {
   for (Edge edge : node->input_edges()) {
     Node* input = edge.to();
-    if (!NodeProperties::IsValueEdge(edge) &&
-        !NodeProperties::IsContextEdge(edge))
+    Node* use = edge.from();
+    if (edge.index() >= use->op()->ValueInputCount() +
+                            OperatorProperties::GetContextInputCount(use->op()))
       continue;
     switch (node->opcode()) {
       case IrOpcode::kStoreField:
@@ -904,13 +999,17 @@
       case IrOpcode::kStateValues:
       case IrOpcode::kReferenceEqual:
       case IrOpcode::kFinishRegion:
-      case IrOpcode::kPhi:
+      case IrOpcode::kObjectIsSmi:
         break;
       default:
         VirtualState* state = virtual_states_[node->id()];
-        if (VirtualObject* obj = ResolveVirtualObject(state, input)) {
-          if (obj->ClearAllFields()) {
-            state->LastChangedAt(node);
+        if (VirtualObject* obj =
+                GetVirtualObject(state, ResolveReplacement(input))) {
+          if (!obj->AllFieldsClear()) {
+            obj = CopyForModificationAt(obj, state, node);
+            obj->ClearAllFields();
+            TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
+                  obj->id());
           }
         }
         break;
@@ -918,22 +1017,32 @@
   }
 }
 
-
-bool EscapeAnalysis::IsEffectBranchPoint(Node* node) {
-  int count = 0;
-  for (Edge edge : node->use_edges()) {
-    if (NodeProperties::IsEffectEdge(edge)) {
-      if (++count > 1) {
-        return true;
-      }
-    }
+VirtualState* EscapeAnalysis::CopyForModificationAt(VirtualState* state,
+                                                    Node* node) {
+  if (state->owner() != node) {
+    VirtualState* new_state = new (zone()) VirtualState(node, *state);
+    virtual_states_[node->id()] = new_state;
+    TRACE("Copying virtual state %p to new state %p at node %s#%d\n",
+          static_cast<void*>(state), static_cast<void*>(new_state),
+          node->op()->mnemonic(), node->id());
+    return new_state;
   }
-  return false;
+  return state;
 }
 
+VirtualObject* EscapeAnalysis::CopyForModificationAt(VirtualObject* obj,
+                                                     VirtualState* state,
+                                                     Node* node) {
+  if (obj->NeedCopyForModification()) {
+    state = CopyForModificationAt(state, node);
+    return state->Copy(obj, GetAlias(obj->id()));
+  }
+  return obj;
+}
 
 void EscapeAnalysis::ForwardVirtualState(Node* node) {
   DCHECK_EQ(node->op()->EffectInputCount(), 1);
+#ifdef DEBUG
   if (node->opcode() != IrOpcode::kLoadField &&
       node->opcode() != IrOpcode::kLoadElement &&
       node->opcode() != IrOpcode::kLoad && IsDanglingEffectNode(node)) {
@@ -941,189 +1050,154 @@
            node->op()->mnemonic());
     UNREACHABLE();
   }
+#endif  // DEBUG
   Node* effect = NodeProperties::GetEffectInput(node);
-  // Break the cycle for effect phis.
-  if (effect->opcode() == IrOpcode::kEffectPhi) {
-    if (virtual_states_[effect->id()] == nullptr) {
-      virtual_states_[effect->id()] =
-          new (zone()) VirtualState(zone(), AliasCount());
-    }
-  }
   DCHECK_NOT_NULL(virtual_states_[effect->id()]);
-  if (IsEffectBranchPoint(effect)) {
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Copying object state %p from #%d (%s) to #%d (%s)\n",
-             static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
-             effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
-    }
-    if (!virtual_states_[node->id()]) {
-      virtual_states_[node->id()] =
-          new (zone()) VirtualState(*virtual_states_[effect->id()]);
-    } else {
-      virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
-                                              zone());
-    }
+  if (virtual_states_[node->id()]) {
+    virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
+                                            zone());
   } else {
     virtual_states_[node->id()] = virtual_states_[effect->id()];
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Forwarding object state %p from #%d (%s) to #%d (%s)\n",
-             static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
-             effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
+    TRACE("Forwarding object state %p from %s#%d to %s#%d",
+          static_cast<void*>(virtual_states_[effect->id()]),
+          effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
+          node->id());
+    if (IsEffectBranchPoint(effect) ||
+        OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+      virtual_states_[node->id()]->SetCopyRequired();
+      TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
+            effect->id());
     }
+    TRACE("\n");
   }
 }
 
-
 void EscapeAnalysis::ProcessStart(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kStart);
-  virtual_states_[node->id()] = new (zone()) VirtualState(zone(), AliasCount());
+  virtual_states_[node->id()] =
+      new (zone()) VirtualState(node, zone(), AliasCount());
 }
 
-
 bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
   bool changed = false;
 
   VirtualState* mergeState = virtual_states_[node->id()];
   if (!mergeState) {
-    mergeState = new (zone()) VirtualState(zone(), AliasCount());
+    mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
     virtual_states_[node->id()] = mergeState;
     changed = true;
-    if (FLAG_trace_turbo_escape) {
-      PrintF("Effect Phi #%d got new states map %p.\n", node->id(),
-             static_cast<void*>(mergeState));
-    }
-  } else if (mergeState->GetLastChanged() != node) {
-    changed = true;
+    TRACE("Effect Phi #%d got new virtual state %p.\n", node->id(),
+          static_cast<void*>(mergeState));
   }
 
   cache_->Clear();
 
-  if (FLAG_trace_turbo_escape) {
-    PrintF("At Effect Phi #%d, merging states into %p:", node->id(),
-           static_cast<void*>(mergeState));
-  }
+  TRACE("At Effect Phi #%d, merging states into %p:", node->id(),
+        static_cast<void*>(mergeState));
 
   for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
     Node* input = NodeProperties::GetEffectInput(node, i);
     VirtualState* state = virtual_states_[input->id()];
     if (state) {
       cache_->states().push_back(state);
+      if (state == mergeState) {
+        mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
+        virtual_states_[node->id()] = mergeState;
+        changed = true;
+      }
     }
-    if (FLAG_trace_turbo_escape) {
-      PrintF(" %p (from %d %s)", static_cast<void*>(state), input->id(),
-             input->op()->mnemonic());
-    }
+    TRACE(" %p (from %d %s)", static_cast<void*>(state), input->id(),
+          input->op()->mnemonic());
   }
-  if (FLAG_trace_turbo_escape) {
-    PrintF("\n");
-  }
+  TRACE("\n");
 
   if (cache_->states().size() == 0) {
     return changed;
   }
 
-  changed = mergeState->MergeFrom(cache_, zone(), graph(), common(),
-                                  NodeProperties::GetControlInput(node)) ||
-            changed;
+  changed =
+      mergeState->MergeFrom(cache_, zone(), graph(), common(), node) || changed;
 
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Merge %s the node.\n", changed ? "changed" : "did not change");
-  }
+  TRACE("Merge %s the node.\n", changed ? "changed" : "did not change");
 
   if (changed) {
-    mergeState->LastChangedAt(node);
-    escape_status_.Resize();
+    status_analysis_.ResizeStatusVector();
   }
   return changed;
 }
 
-
 void EscapeAnalysis::ProcessAllocation(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
   ForwardVirtualState(node);
+  VirtualState* state = virtual_states_[node->id()];
+  Alias alias = GetAlias(node->id());
 
   // Check if we have already processed this node.
-  if (virtual_states_[node->id()]->VirtualObjectFromAlias(
-          aliases_[node->id()])) {
+  if (state->VirtualObjectFromAlias(alias)) {
     return;
   }
 
+  if (state->owner()->opcode() == IrOpcode::kEffectPhi) {
+    state = CopyForModificationAt(state, node);
+  }
+
   NumberMatcher size(node->InputAt(0));
   DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
          node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
          node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
          node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
   if (size.HasValue()) {
-    virtual_states_[node->id()]->SetVirtualObject(
-        aliases_[node->id()],
-        new (zone())
-            VirtualObject(node->id(), zone(), size.Value() / kPointerSize));
+    VirtualObject* obj = new (zone()) VirtualObject(
+        node->id(), state, zone(), size.Value() / kPointerSize, false);
+    state->SetVirtualObject(alias, obj);
   } else {
-    virtual_states_[node->id()]->SetVirtualObject(
-        aliases_[node->id()], new (zone()) VirtualObject(node->id(), zone()));
+    state->SetVirtualObject(
+        alias, new (zone()) VirtualObject(node->id(), state, zone()));
   }
-  virtual_states_[node->id()]->LastChangedAt(node);
 }
 
-
 void EscapeAnalysis::ProcessFinishRegion(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
   ForwardVirtualState(node);
   Node* allocation = NodeProperties::GetValueInput(node, 0);
   if (allocation->opcode() == IrOpcode::kAllocate) {
     VirtualState* state = virtual_states_[node->id()];
-    if (!state->VirtualObjectFromAlias(aliases_[node->id()])) {
-      VirtualObject* vobj_alloc =
-          state->VirtualObjectFromAlias(aliases_[allocation->id()]);
-      DCHECK_NOT_NULL(vobj_alloc);
-      state->SetVirtualObject(aliases_[node->id()], vobj_alloc);
-      if (FLAG_trace_turbo_escape) {
-        PrintF("Linked finish region node #%d to node #%d\n", node->id(),
-               allocation->id());
-      }
-      state->LastChangedAt(node);
-    }
+    VirtualObject* obj = state->VirtualObjectFromAlias(GetAlias(node->id()));
+    DCHECK_NOT_NULL(obj);
+    obj->SetInitialized();
   }
 }
 
-
 Node* EscapeAnalysis::replacement(NodeId id) {
   if (id >= replacements_.size()) return nullptr;
   return replacements_[id];
 }
 
-
 Node* EscapeAnalysis::replacement(Node* node) {
   return replacement(node->id());
 }
 
-
 bool EscapeAnalysis::SetReplacement(Node* node, Node* rep) {
   bool changed = replacements_[node->id()] != rep;
   replacements_[node->id()] = rep;
   return changed;
 }
 
-
 bool EscapeAnalysis::UpdateReplacement(VirtualState* state, Node* node,
                                        Node* rep) {
   if (SetReplacement(node, rep)) {
-    state->LastChangedAt(node);
-    if (FLAG_trace_turbo_escape) {
-      if (rep) {
-        PrintF("Replacement of #%d is #%d (%s)\n", node->id(), rep->id(),
-               rep->op()->mnemonic());
-      } else {
-        PrintF("Replacement of #%d cleared\n", node->id());
-      }
+    if (rep) {
+      TRACE("Replacement of #%d is #%d (%s)\n", node->id(), rep->id(),
+            rep->op()->mnemonic());
+    } else {
+      TRACE("Replacement of #%d cleared\n", node->id());
     }
     return true;
   }
   return false;
 }
 
-
 Node* EscapeAnalysis::ResolveReplacement(Node* node) {
   while (replacement(node)) {
     node = replacement(node);
@@ -1131,12 +1205,10 @@
   return node;
 }
 
-
 Node* EscapeAnalysis::GetReplacement(Node* node) {
   return GetReplacement(node->id());
 }
 
-
 Node* EscapeAnalysis::GetReplacement(NodeId id) {
   Node* node = nullptr;
   while (replacement(id)) {
@@ -1146,50 +1218,31 @@
   return node;
 }
 
-
 bool EscapeAnalysis::IsVirtual(Node* node) {
-  if (node->id() >= escape_status_.size()) {
+  if (node->id() >= status_analysis_.GetStatusVectorSize()) {
     return false;
   }
-  return escape_status_.IsVirtual(node);
+  return status_analysis_.IsVirtual(node);
 }
 
-
 bool EscapeAnalysis::IsEscaped(Node* node) {
-  if (node->id() >= escape_status_.size()) {
+  if (node->id() >= status_analysis_.GetStatusVectorSize()) {
     return false;
   }
-  return escape_status_.IsEscaped(node);
+  return status_analysis_.IsEscaped(node);
 }
 
-
 bool EscapeAnalysis::SetEscaped(Node* node) {
-  return escape_status_.SetEscaped(node);
+  return status_analysis_.SetEscaped(node);
 }
 
-
 VirtualObject* EscapeAnalysis::GetVirtualObject(Node* at, NodeId id) {
   if (VirtualState* states = virtual_states_[at->id()]) {
-    return states->VirtualObjectFromAlias(aliases_[id]);
+    return states->VirtualObjectFromAlias(GetAlias(id));
   }
   return nullptr;
 }
 
-
-VirtualObject* EscapeAnalysis::ResolveVirtualObject(VirtualState* state,
-                                                    Node* node) {
-  VirtualObject* obj = GetVirtualObject(state, ResolveReplacement(node));
-  while (obj && replacement(obj->id())) {
-    if (VirtualObject* next = GetVirtualObject(state, replacement(obj->id()))) {
-      obj = next;
-    } else {
-      break;
-    }
-  }
-  return obj;
-}
-
-
 bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
   DCHECK(IsVirtual(left) && IsVirtual(right));
   left = ResolveReplacement(left);
@@ -1200,83 +1253,78 @@
   return false;
 }
 
-
 int EscapeAnalysis::OffsetFromAccess(Node* node) {
   DCHECK(OpParameter<FieldAccess>(node).offset % kPointerSize == 0);
   return OpParameter<FieldAccess>(node).offset / kPointerSize;
 }
 
-
-void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* node,
+void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
                                         VirtualState* state) {
-  if (FLAG_trace_turbo_escape) {
-    PrintF("Load #%d from phi #%d", node->id(), from->id());
-  }
+  TRACE("Load #%d from phi #%d", load->id(), from->id());
 
   cache_->fields().clear();
-  for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
-    Node* input = NodeProperties::GetValueInput(node, i);
+  for (int i = 0; i < load->op()->ValueInputCount(); ++i) {
+    Node* input = NodeProperties::GetValueInput(load, i);
     cache_->fields().push_back(input);
   }
 
-  cache_->LoadVirtualObjectsForFieldsFrom(state, aliases_);
+  cache_->LoadVirtualObjectsForFieldsFrom(state,
+                                          status_analysis_.GetAliasMap());
   if (cache_->objects().size() == cache_->fields().size()) {
     cache_->GetFields(offset);
     if (cache_->fields().size() == cache_->objects().size()) {
-      Node* rep = replacement(node);
+      Node* rep = replacement(load);
       if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
         int value_input_count = static_cast<int>(cache_->fields().size());
         cache_->fields().push_back(NodeProperties::GetControlInput(from));
         Node* phi = graph()->NewNode(
             common()->Phi(MachineRepresentation::kTagged, value_input_count),
             value_input_count + 1, &cache_->fields().front());
-        escape_status_.Resize();
-        SetReplacement(node, phi);
-        state->LastChangedAt(node);
-        if (FLAG_trace_turbo_escape) {
-          PrintF(" got phi created.\n");
-        }
-      } else if (FLAG_trace_turbo_escape) {
-        PrintF(" has already phi #%d.\n", rep->id());
+        status_analysis_.ResizeStatusVector();
+        SetReplacement(load, phi);
+        TRACE(" got phi created.\n");
+      } else {
+        TRACE(" has already phi #%d.\n", rep->id());
       }
-    } else if (FLAG_trace_turbo_escape) {
-      PrintF(" has incomplete field info.\n");
+    } else {
+      TRACE(" has incomplete field info.\n");
     }
-  } else if (FLAG_trace_turbo_escape) {
-    PrintF(" has incomplete virtual object info.\n");
+  } else {
+    TRACE(" has incomplete virtual object info.\n");
   }
 }
 
-
 void EscapeAnalysis::ProcessLoadField(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kLoadField);
   ForwardVirtualState(node);
-  Node* from = NodeProperties::GetValueInput(node, 0);
+  Node* from = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
   VirtualState* state = virtual_states_[node->id()];
-  if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+  if (VirtualObject* object = GetVirtualObject(state, from)) {
     int offset = OffsetFromAccess(node);
-    if (!object->IsTracked()) return;
+    if (!object->IsTracked() ||
+        static_cast<size_t>(offset) >= object->field_count()) {
+      return;
+    }
     Node* value = object->GetField(offset);
     if (value) {
       value = ResolveReplacement(value);
     }
     // Record that the load has this alias.
     UpdateReplacement(state, node, value);
+  } else if (from->opcode() == IrOpcode::kPhi &&
+             OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
+    int offset = OffsetFromAccess(node);
+    // Only binary phis are supported for now.
+    ProcessLoadFromPhi(offset, from, node, state);
   } else {
-    if (from->opcode() == IrOpcode::kPhi &&
-        OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
-      int offset = OffsetFromAccess(node);
-      // Only binary phis are supported for now.
-      ProcessLoadFromPhi(offset, from, node, state);
-    }
+    UpdateReplacement(state, node, nullptr);
   }
 }
 
-
 void EscapeAnalysis::ProcessLoadElement(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kLoadElement);
   ForwardVirtualState(node);
-  Node* from = NodeProperties::GetValueInput(node, 0);
+  Node* from = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
   VirtualState* state = virtual_states_[node->id()];
   Node* index_node = node->InputAt(1);
   NumberMatcher index(index_node);
@@ -1287,12 +1335,16 @@
   ElementAccess access = OpParameter<ElementAccess>(node);
   if (index.HasValue()) {
     int offset = index.Value() + access.header_size / kPointerSize;
-    if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+    if (VirtualObject* object = GetVirtualObject(state, from)) {
       CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
                kPointerSizeLog2);
       CHECK_EQ(access.header_size % kPointerSize, 0);
 
-      if (!object->IsTracked()) return;
+      if (!object->IsTracked() ||
+          static_cast<size_t>(offset) >= object->field_count()) {
+        return;
+      }
+
       Node* value = object->GetField(offset);
       if (value) {
         value = ResolveReplacement(value);
@@ -1303,43 +1355,42 @@
       ElementAccess access = OpParameter<ElementAccess>(node);
       int offset = index.Value() + access.header_size / kPointerSize;
       ProcessLoadFromPhi(offset, from, node, state);
+    } else {
+      UpdateReplacement(state, node, nullptr);
     }
   } else {
     // We have a load from a non-const index, cannot eliminate object.
     if (SetEscaped(from)) {
-      if (FLAG_trace_turbo_escape) {
-        PrintF(
-            "Setting #%d (%s) to escaped because store element #%d to "
-            "non-const "
-            "index #%d (%s)\n",
-            from->id(), from->op()->mnemonic(), node->id(), index_node->id(),
-            index_node->op()->mnemonic());
-      }
+      TRACE(
+          "Setting #%d (%s) to escaped because load element #%d from non-const "
+          "index #%d (%s)\n",
+          from->id(), from->op()->mnemonic(), node->id(), index_node->id(),
+          index_node->op()->mnemonic());
     }
   }
 }
 
-
 void EscapeAnalysis::ProcessStoreField(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
   ForwardVirtualState(node);
-  Node* to = NodeProperties::GetValueInput(node, 0);
-  Node* val = NodeProperties::GetValueInput(node, 1);
+  Node* to = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
   VirtualState* state = virtual_states_[node->id()];
-  if (VirtualObject* obj = ResolveVirtualObject(state, to)) {
-    if (!obj->IsTracked()) return;
-    int offset = OffsetFromAccess(node);
-    if (obj->SetField(offset, ResolveReplacement(val))) {
-      state->LastChangedAt(node);
+  VirtualObject* obj = GetVirtualObject(state, to);
+  int offset = OffsetFromAccess(node);
+  if (obj && obj->IsTracked() &&
+      static_cast<size_t>(offset) < obj->field_count()) {
+    Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 1));
+    if (obj->GetField(offset) != val) {
+      obj = CopyForModificationAt(obj, state, node);
+      obj->SetField(offset, val);
     }
   }
 }
 
-
 void EscapeAnalysis::ProcessStoreElement(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
   ForwardVirtualState(node);
-  Node* to = NodeProperties::GetValueInput(node, 0);
+  Node* to = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
   Node* index_node = node->InputAt(1);
   NumberMatcher index(index_node);
   DCHECK(index_node->opcode() != IrOpcode::kInt32Constant &&
@@ -1347,41 +1398,47 @@
          index_node->opcode() != IrOpcode::kFloat32Constant &&
          index_node->opcode() != IrOpcode::kFloat64Constant);
   ElementAccess access = OpParameter<ElementAccess>(node);
-  Node* val = NodeProperties::GetValueInput(node, 2);
+  VirtualState* state = virtual_states_[node->id()];
+  VirtualObject* obj = GetVirtualObject(state, to);
   if (index.HasValue()) {
     int offset = index.Value() + access.header_size / kPointerSize;
-    VirtualState* states = virtual_states_[node->id()];
-    if (VirtualObject* obj = ResolveVirtualObject(states, to)) {
-      if (!obj->IsTracked()) return;
+    if (obj && obj->IsTracked() &&
+        static_cast<size_t>(offset) < obj->field_count()) {
       CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
                kPointerSizeLog2);
       CHECK_EQ(access.header_size % kPointerSize, 0);
-      if (obj->SetField(offset, ResolveReplacement(val))) {
-        states->LastChangedAt(node);
+      Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 2));
+      if (obj->GetField(offset) != val) {
+        obj = CopyForModificationAt(obj, state, node);
+        obj->SetField(offset, val);
       }
     }
   } else {
     // We have a store to a non-const index, cannot eliminate object.
     if (SetEscaped(to)) {
-      if (FLAG_trace_turbo_escape) {
-        PrintF(
-            "Setting #%d (%s) to escaped because store element #%d to "
-            "non-const "
-            "index #%d (%s)\n",
-            to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
-            index_node->op()->mnemonic());
+      TRACE(
+          "Setting #%d (%s) to escaped because store element #%d to non-const "
+          "index #%d (%s)\n",
+          to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
+          index_node->op()->mnemonic());
+    }
+    if (obj && obj->IsTracked()) {
+      if (!obj->AllFieldsClear()) {
+        obj = CopyForModificationAt(obj, state, node);
+        obj->ClearAllFields();
+        TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
+              obj->id());
       }
     }
   }
 }
 
-
 Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
   if ((node->opcode() == IrOpcode::kFinishRegion ||
        node->opcode() == IrOpcode::kAllocate) &&
       IsVirtual(node)) {
-    if (VirtualObject* vobj =
-            ResolveVirtualObject(virtual_states_[effect->id()], node)) {
+    if (VirtualObject* vobj = GetVirtualObject(virtual_states_[effect->id()],
+                                               ResolveReplacement(node))) {
       if (Node* object_state = vobj->GetObjectState()) {
         return object_state;
       } else {
@@ -1396,13 +1453,11 @@
             graph()->NewNode(common()->ObjectState(input_count, vobj->id()),
                              input_count, &cache_->fields().front());
         vobj->SetObjectState(new_object_state);
-        if (FLAG_trace_turbo_escape) {
-          PrintF(
-              "Creating object state #%d for vobj %p (from node #%d) at effect "
-              "#%d\n",
-              new_object_state->id(), static_cast<void*>(vobj), node->id(),
-              effect->id());
-        }
+        TRACE(
+            "Creating object state #%d for vobj %p (from node #%d) at effect "
+            "#%d\n",
+            new_object_state->id(), static_cast<void*>(vobj), node->id(),
+            effect->id());
         // Now fix uses of other objects.
         for (size_t i = 0; i < vobj->field_count(); ++i) {
           if (Node* field = vobj->GetField(i)) {
@@ -1420,7 +1475,6 @@
   return nullptr;
 }
 
-
 void EscapeAnalysis::DebugPrintObject(VirtualObject* object, Alias alias) {
   PrintF("  Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
          object->field_count());
@@ -1431,9 +1485,8 @@
   }
 }
 
-
 void EscapeAnalysis::DebugPrintState(VirtualState* state) {
-  PrintF("Dumping object state %p\n", static_cast<void*>(state));
+  PrintF("Dumping virtual state %p\n", static_cast<void*>(state));
   for (Alias alias = 0; alias < AliasCount(); ++alias) {
     if (VirtualObject* object = state->VirtualObjectFromAlias(alias)) {
       DebugPrintObject(object, alias);
@@ -1441,7 +1494,6 @@
   }
 }
 
-
 void EscapeAnalysis::DebugPrint() {
   ZoneVector<VirtualState*> object_states(zone());
   for (NodeId id = 0; id < virtual_states_.size(); id++) {
@@ -1457,15 +1509,26 @@
   }
 }
 
-
 VirtualObject* EscapeAnalysis::GetVirtualObject(VirtualState* state,
                                                 Node* node) {
-  if (node->id() >= aliases_.size()) return nullptr;
-  Alias alias = aliases_[node->id()];
+  if (node->id() >= status_analysis_.GetAliasMap().size()) return nullptr;
+  Alias alias = GetAlias(node->id());
   if (alias >= state->size()) return nullptr;
   return state->VirtualObjectFromAlias(alias);
 }
 
+bool EscapeAnalysis::ExistsVirtualAllocate() {
+  for (size_t id = 0; id < status_analysis_.GetAliasMap().size(); ++id) {
+    Alias alias = GetAlias(static_cast<NodeId>(id));
+    if (alias < EscapeStatusAnalysis::kUntrackable) {
+      if (status_analysis_.IsVirtual(static_cast<int>(id))) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/escape-analysis.h b/src/compiler/escape-analysis.h
index ea7b11e..c3f236d 100644
--- a/src/compiler/escape-analysis.h
+++ b/src/compiler/escape-analysis.h
@@ -18,34 +18,63 @@
 class VirtualState;
 class VirtualObject;
 
-
 // EscapeStatusAnalysis determines for each allocation whether it escapes.
 class EscapeStatusAnalysis {
  public:
+  typedef NodeId Alias;
   ~EscapeStatusAnalysis();
 
-  enum EscapeStatusFlag {
+  enum Status {
     kUnknown = 0u,
     kTracked = 1u << 0,
     kEscaped = 1u << 1,
     kOnStack = 1u << 2,
     kVisited = 1u << 3,
+    // A node is dangling, if it is a load of some kind, and does not have
+    // an effect successor.
+    kDanglingComputed = 1u << 4,
+    kDangling = 1u << 5,
+    // A node is is an effect branch point, if it has more than 2 non-dangling
+    // effect successors.
+    kBranchPointComputed = 1u << 6,
+    kBranchPoint = 1u << 7,
+    kInQueue = 1u << 8
   };
-  typedef base::Flags<EscapeStatusFlag, unsigned char> EscapeStatusFlags;
+  typedef base::Flags<Status, uint16_t> StatusFlags;
 
-  void Run();
+  void RunStatusAnalysis();
 
   bool IsVirtual(Node* node);
   bool IsEscaped(Node* node);
   bool IsAllocation(Node* node);
 
+  bool IsInQueue(NodeId id);
+  void SetInQueue(NodeId id, bool on_stack);
+
   void DebugPrint();
 
-  friend class EscapeAnalysis;
-
- private:
   EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
                        Zone* zone);
+  void EnqueueForStatusAnalysis(Node* node);
+  bool SetEscaped(Node* node);
+  bool IsEffectBranchPoint(Node* node);
+  bool IsDanglingEffectNode(Node* node);
+  void ResizeStatusVector();
+  size_t GetStatusVectorSize();
+  bool IsVirtual(NodeId id);
+
+  Graph* graph() const { return graph_; }
+  Zone* zone() const { return zone_; }
+  void AssignAliases();
+  Alias GetAlias(NodeId id) const { return aliases_[id]; }
+  const ZoneVector<Alias>& GetAliasMap() const { return aliases_; }
+  Alias AliasCount() const { return next_free_alias_; }
+  static const Alias kNotReachable;
+  static const Alias kUntrackable;
+
+  bool IsNotReachable(Node* node);
+
+ private:
   void Process(Node* node);
   void ProcessAllocate(Node* node);
   void ProcessFinishRegion(Node* node);
@@ -57,38 +86,35 @@
   bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
   void RevisitUses(Node* node);
   void RevisitInputs(Node* node);
-  bool SetEscaped(Node* node);
+
+  Alias NextAlias() { return next_free_alias_++; }
+
   bool HasEntry(Node* node);
-  void Resize();
-  size_t size();
+
   bool IsAllocationPhi(Node* node);
 
-  Graph* graph() const { return graph_; }
-  Zone* zone() const { return zone_; }
-
+  ZoneVector<Node*> stack_;
   EscapeAnalysis* object_analysis_;
   Graph* const graph_;
   Zone* const zone_;
-  ZoneVector<EscapeStatusFlags> status_;
-  ZoneDeque<Node*> queue_;
+  ZoneVector<StatusFlags> status_;
+  Alias next_free_alias_;
+  ZoneVector<Node*> status_stack_;
+  ZoneVector<Alias> aliases_;
 
   DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
 };
 
-
-DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::EscapeStatusFlags)
-
+DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::StatusFlags)
 
 // Forward Declaration.
 class MergeCache;
 
-
 // EscapeObjectAnalysis simulates stores to determine values of loads if
 // an object is virtual and eliminated.
 class EscapeAnalysis {
  public:
-  typedef NodeId Alias;
-
+  using Alias = EscapeStatusAnalysis::Alias;
   EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
   ~EscapeAnalysis();
 
@@ -99,10 +125,10 @@
   bool IsEscaped(Node* node);
   bool CompareVirtualObjects(Node* left, Node* right);
   Node* GetOrCreateObjectState(Node* effect, Node* node);
+  bool ExistsVirtualAllocate();
 
  private:
   void RunObjectAnalysis();
-  void AssignAliases();
   bool Process(Node* node);
   void ProcessLoadField(Node* node);
   void ProcessStoreField(Node* node);
@@ -118,13 +144,11 @@
                           VirtualState* states);
 
   void ForwardVirtualState(Node* node);
-  bool IsEffectBranchPoint(Node* node);
-  bool IsDanglingEffectNode(Node* node);
   int OffsetFromAccess(Node* node);
-
+  VirtualState* CopyForModificationAt(VirtualState* state, Node* node);
+  VirtualObject* CopyForModificationAt(VirtualObject* obj, VirtualState* state,
+                                       Node* node);
   VirtualObject* GetVirtualObject(Node* at, NodeId id);
-  VirtualObject* ResolveVirtualObject(VirtualState* state, Node* node);
-  Node* GetReplacementIfSame(ZoneVector<VirtualObject*>& objs);
 
   bool SetEscaped(Node* node);
   Node* replacement(NodeId id);
@@ -140,24 +164,26 @@
   void DebugPrintState(VirtualState* state);
   void DebugPrintObject(VirtualObject* state, Alias id);
 
-  Alias NextAlias() { return next_free_alias_++; }
-  Alias AliasCount() const { return next_free_alias_; }
-
-  Graph* graph() const { return graph_; }
+  Graph* graph() const { return status_analysis_.graph(); }
+  Zone* zone() const { return status_analysis_.zone(); }
   CommonOperatorBuilder* common() const { return common_; }
-  Zone* zone() const { return zone_; }
+  bool IsEffectBranchPoint(Node* node) {
+    return status_analysis_.IsEffectBranchPoint(node);
+  }
+  bool IsDanglingEffectNode(Node* node) {
+    return status_analysis_.IsDanglingEffectNode(node);
+  }
+  bool IsNotReachable(Node* node) {
+    return status_analysis_.IsNotReachable(node);
+  }
+  Alias GetAlias(NodeId id) const { return status_analysis_.GetAlias(id); }
+  Alias AliasCount() const { return status_analysis_.AliasCount(); }
 
-  static const Alias kNotReachable;
-  static const Alias kUntrackable;
-  Graph* const graph_;
+  EscapeStatusAnalysis status_analysis_;
   CommonOperatorBuilder* const common_;
-  Zone* const zone_;
   ZoneVector<VirtualState*> virtual_states_;
   ZoneVector<Node*> replacements_;
-  EscapeStatusAnalysis escape_status_;
   MergeCache* cache_;
-  ZoneVector<Alias> aliases_;
-  Alias next_free_alias_;
 
   DISALLOW_COPY_AND_ASSIGN(EscapeAnalysis);
 };
diff --git a/src/compiler/fast-accessor-assembler.cc b/src/compiler/fast-accessor-assembler.cc
index 09d513f..518003b 100644
--- a/src/compiler/fast-accessor-assembler.cc
+++ b/src/compiler/fast-accessor-assembler.cc
@@ -5,6 +5,7 @@
 #include "src/compiler/fast-accessor-assembler.h"
 
 #include "src/base/logging.h"
+#include "src/code-stubs.h"  // For CallApiFunctionStub.
 #include "src/compiler/graph.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/pipeline.h"
@@ -166,6 +167,46 @@
   assembler_->Bind(&pass);
 }
 
+FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
+    FunctionCallback callback_function, ValueId arg) {
+  CHECK_EQ(kBuilding, state_);
+
+  // Create API function stub.
+  CallApiFunctionStub stub(assembler_->isolate(), true);
+
+  // Wrap the FunctionCallback in an ExternalReference.
+  ApiFunction callback_api_function(FUNCTION_ADDR(callback_function));
+  ExternalReference callback(&callback_api_function,
+                             ExternalReference::DIRECT_API_CALL,
+                             assembler_->isolate());
+
+  // The stub has 5 parameters, and kJSParam (here: 1) parameters to pass
+  // through to the callback.
+  // See: ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType
+  static const int kStackParam = 1;
+  Node* args[] = {
+      // Stub/register parameters:
+      assembler_->Parameter(0),                /* receiver (use accessor's) */
+      assembler_->UndefinedConstant(),         /* call_data (undefined) */
+      assembler_->NullConstant(),              /* holder (null) */
+      assembler_->ExternalConstant(callback),  /* API callback function */
+      assembler_->IntPtrConstant(kStackParam), /* # JS arguments */
+
+      // kStackParam stack parameter(s):
+      FromId(arg),
+
+      // Context parameter. (See Linkage::GetStubCallDescriptor.)
+      assembler_->UndefinedConstant()};
+  CHECK_EQ(5 + kStackParam + 1, arraysize(args));
+
+  Node* call = assembler_->CallN(
+      Linkage::GetStubCallDescriptor(
+          assembler_->isolate(), zone(), stub.GetCallInterfaceDescriptor(),
+          kStackParam + stub.GetStackParameterCount(),
+          CallDescriptor::kNoFlags),
+      assembler_->HeapConstant(stub.GetCode()), args);
+  return FromRaw(call);
+}
 
 MaybeHandle<Code> FastAccessorAssembler::Build() {
   CHECK_EQ(kBuilding, state_);
@@ -176,9 +217,10 @@
 
   // Export the schedule and call the compiler.
   Schedule* schedule = assembler_->Export();
+  Code::Flags flags = Code::ComputeFlags(Code::STUB);
   MaybeHandle<Code> code = Pipeline::GenerateCodeForCodeStub(
       assembler_->isolate(), assembler_->call_descriptor(), assembler_->graph(),
-      schedule, Code::STUB, "FastAccessorAssembler");
+      schedule, flags, "FastAccessorAssembler");
 
   // Update state & return.
   state_ = !code.is_null() ? kBuilt : kError;
diff --git a/src/compiler/fast-accessor-assembler.h b/src/compiler/fast-accessor-assembler.h
index a9df3f0..1cb751d 100644
--- a/src/compiler/fast-accessor-assembler.h
+++ b/src/compiler/fast-accessor-assembler.h
@@ -48,6 +48,7 @@
  public:
   typedef v8::experimental::FastAccessorBuilder::ValueId ValueId;
   typedef v8::experimental::FastAccessorBuilder::LabelId LabelId;
+  typedef v8::FunctionCallback FunctionCallback;
 
   explicit FastAccessorAssembler(Isolate* isolate);
   ~FastAccessorAssembler();
@@ -63,15 +64,13 @@
   void ReturnValue(ValueId value_id);
   void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
   void CheckNotZeroOrReturnNull(ValueId value_id);
-
-  // TODO(vogelheim): Implement a C++ callback.
-  //  void CheckNotNullOrCallback(ValueId value_id, ..c++-callback type...,
-  //     ValueId arg1, ValueId arg2, ...);
-
   LabelId MakeLabel();
   void SetLabel(LabelId label_id);
   void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
 
+  // C++ callback.
+  ValueId Call(FunctionCallback callback, ValueId arg);
+
   // Assemble the code.
   MaybeHandle<Code> Build();
 
diff --git a/src/compiler/frame-states.h b/src/compiler/frame-states.h
index ddb55c3..60ff9b5 100644
--- a/src/compiler/frame-states.h
+++ b/src/compiler/frame-states.h
@@ -83,31 +83,20 @@
 };
 
 
-enum ContextCallingMode {
-  CALL_MAINTAINS_NATIVE_CONTEXT,
-  CALL_CHANGES_NATIVE_CONTEXT
-};
-
-
 class FrameStateFunctionInfo {
  public:
   FrameStateFunctionInfo(FrameStateType type, int parameter_count,
                          int local_count,
-                         Handle<SharedFunctionInfo> shared_info,
-                         ContextCallingMode context_calling_mode)
+                         Handle<SharedFunctionInfo> shared_info)
       : type_(type),
         parameter_count_(parameter_count),
         local_count_(local_count),
-        shared_info_(shared_info),
-        context_calling_mode_(context_calling_mode) {}
+        shared_info_(shared_info) {}
 
   int local_count() const { return local_count_; }
   int parameter_count() const { return parameter_count_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
   FrameStateType type() const { return type_; }
-  ContextCallingMode context_calling_mode() const {
-    return context_calling_mode_;
-  }
 
   static bool IsJSFunctionType(FrameStateType type) {
     return type == FrameStateType::kJavaScriptFunction ||
@@ -119,7 +108,6 @@
   int const parameter_count_;
   int const local_count_;
   Handle<SharedFunctionInfo> const shared_info_;
-  ContextCallingMode context_calling_mode_;
 };
 
 
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index 72f756b..011a0f0 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -34,19 +34,10 @@
 //   determined after register allocation once the number of used callee-saved
 //   register is certain.
 //
-// Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
-// two slots.
-//
-// Stack slot indices >= 0 access the callee stack with slot 0 corresponding to
-// the callee's saved return address and 1 corresponding to the saved frame
-// pointer. Some frames have additional information stored in the fixed header,
-// for example JSFunctions store the function context and marker in the fixed
-// header, with slot index 2 corresponding to the current function context and 3
-// corresponding to the frame marker/JSFunction. The frame region immediately
-// below the fixed header contains spill slots starting at 4 for JsFunctions.
-// The callee-saved frame region below that starts at 4+spill_slot_count_.
-// Callee stack slots corresponding to parameters are accessible through
-// negative slot ids.
+// The frame region immediately below the fixed header contains spill slots
+// starting at slot 4 for JSFunctions.  The callee-saved frame region below that
+// starts at 4+spill_slot_count_.  Callee stack slots corresponding to
+// parameters are accessible through negative slot ids.
 //
 // Every slot of a caller or callee frame is accessible by the register
 // allocator and gap resolver with a SpillSlotOperand containing its
@@ -76,13 +67,13 @@
 //       |- - - - - - - - -|   |                   frame slots
 //  ...  |      ...        | Spill slots           (slot >= 0)
 //       |- - - - - - - - -|   |                        |
-//  m+4  |    spill m      |   v                        |
+//  m+3  |    spill m      |   v                        |
 //       +-----------------+----                        |
-//  m+5  |  callee-saved 1 |   ^                        |
+//  m+4  |  callee-saved 1 |   ^                        |
 //       |- - - - - - - - -|   |                        |
 //       |      ...        | Callee-saved               |
 //       |- - - - - - - - -|   |                        |
-// m+r+4 |  callee-saved r |   v                        v
+// m+r+3 |  callee-saved r |   v                        v
 //  -----+-----------------+----- <-- stack ptr -------------
 //
 class Frame : public ZoneObject {
@@ -90,16 +81,6 @@
   explicit Frame(int fixed_frame_size_in_slots,
                  const CallDescriptor* descriptor);
 
-  static int FPOffsetToSlot(int frame_offset) {
-    return StandardFrameConstants::kFixedSlotCountAboveFp - 1 -
-           frame_offset / kPointerSize;
-  }
-
-  static int SlotToFPOffset(int slot) {
-    return (StandardFrameConstants::kFixedSlotCountAboveFp - 1 - slot) *
-           kPointerSize;
-  }
-
   inline bool needs_frame() const { return needs_frame_; }
   inline void MarkNeedsFrame() { needs_frame_ = true; }
 
diff --git a/src/compiler/graph-trimmer.cc b/src/compiler/graph-trimmer.cc
index 5fae425..75071c6 100644
--- a/src/compiler/graph-trimmer.cc
+++ b/src/compiler/graph-trimmer.cc
@@ -24,7 +24,8 @@
   MarkAsLive(graph()->end());
   // Compute transitive closure of live nodes.
   for (size_t i = 0; i < live_.size(); ++i) {
-    for (Node* const input : live_[i]->inputs()) MarkAsLive(input);
+    Node* const live = live_[i];
+    for (Node* const input : live->inputs()) MarkAsLive(input);
   }
   // Remove dead->live edges.
   for (Node* const live : live_) {
diff --git a/src/compiler/graph-trimmer.h b/src/compiler/graph-trimmer.h
index d8258be..98d335a 100644
--- a/src/compiler/graph-trimmer.h
+++ b/src/compiler/graph-trimmer.h
@@ -28,14 +28,18 @@
   // or any of the roots in the sequence [{begin},{end}[.
   template <typename ForwardIterator>
   void TrimGraph(ForwardIterator begin, ForwardIterator end) {
-    while (begin != end) MarkAsLive(*begin++);
+    while (begin != end) {
+      Node* const node = *begin++;
+      if (!node->IsDead()) MarkAsLive(node);
+    }
     TrimGraph();
   }
 
  private:
   V8_INLINE bool IsLive(Node* const node) { return is_live_.Get(node); }
   V8_INLINE void MarkAsLive(Node* const node) {
-    if (!node->IsDead() && !IsLive(node)) {
+    DCHECK(!node->IsDead());
+    if (!IsLive(node)) {
       is_live_.Set(node, true);
       live_.push_back(node);
     }
diff --git a/src/compiler/graph.cc b/src/compiler/graph.cc
index 3d4d6da..ba69617 100644
--- a/src/compiler/graph.cc
+++ b/src/compiler/graph.cc
@@ -42,17 +42,15 @@
   decorators_.erase(it);
 }
 
-
-Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs,
+Node* Graph::NewNode(const Operator* op, int input_count, Node* const* inputs,
                      bool incomplete) {
   Node* node = NewNodeUnchecked(op, input_count, inputs, incomplete);
   Verifier::VerifyNode(node);
   return node;
 }
 
-
 Node* Graph::NewNodeUnchecked(const Operator* op, int input_count,
-                              Node** inputs, bool incomplete) {
+                              Node* const* inputs, bool incomplete) {
   Node* const node =
       Node::New(zone(), NextNodeId(), op, input_count, inputs, incomplete);
   Decorate(node);
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
index b53c7fd..958a15d 100644
--- a/src/compiler/graph.h
+++ b/src/compiler/graph.h
@@ -34,16 +34,16 @@
   explicit Graph(Zone* zone);
 
   // Base implementation used by all factory methods.
-  Node* NewNodeUnchecked(const Operator* op, int input_count, Node** inputs,
-                         bool incomplete = false);
+  Node* NewNodeUnchecked(const Operator* op, int input_count,
+                         Node* const* inputs, bool incomplete = false);
 
   // Factory that checks the input count.
-  Node* NewNode(const Operator* op, int input_count, Node** inputs,
+  Node* NewNode(const Operator* op, int input_count, Node* const* inputs,
                 bool incomplete = false);
 
   // Factories for nodes with static input counts.
   Node* NewNode(const Operator* op) {
-    return NewNode(op, 0, static_cast<Node**>(nullptr));
+    return NewNode(op, 0, static_cast<Node* const*>(nullptr));
   }
   Node* NewNode(const Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
   Node* NewNode(const Operator* op, Node* n1, Node* n2) {
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index f63bc22..1f61af8 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -9,6 +9,7 @@
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/osr.h"
+#include "src/frames.h"
 #include "src/ia32/assembler-ia32.h"
 #include "src/ia32/frames-ia32.h"
 #include "src/ia32/macro-assembler-ia32.h"
@@ -56,7 +57,7 @@
 
   Operand ToMaterializableOperand(int materializable_offset) {
     FrameOffset offset = frame_access_state()->GetFrameOffset(
-        Frame::FPOffsetToSlot(materializable_offset));
+        FPOffsetToFrameSlot(materializable_offset));
     return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
   }
 
@@ -241,15 +242,16 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, zero,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, zero,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ lea(scratch1_, operand_);
     __ CallStub(&stub);
   }
@@ -413,11 +415,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       // Frame alignment requires using FP-relative frame addressing.
       frame_access_state()->SetFrameAccessToFP();
@@ -471,6 +468,13 @@
     case kArchFramePointer:
       __ mov(i.OutputRegister(), ebp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ mov(i.OutputRegister(), Operand(ebp, 0));
+      } else {
+        __ mov(i.OutputRegister(), ebp);
+      }
+      break;
     case kArchTruncateDoubleToI: {
       auto result = i.OutputRegister();
       auto input = i.InputDoubleRegister(0);
@@ -499,6 +503,18 @@
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      Register base;
+      if (offset.from_stack_pointer()) {
+        base = esp;
+      } else {
+        base = ebp;
+      }
+      __ lea(i.OutputRegister(), Operand(base, offset.offset()));
+      break;
+    }
     case kIA32Add:
       if (HasImmediateInput(instr, 1)) {
         __ add(i.InputOperand(0), i.InputImmediate(1));
@@ -514,17 +530,37 @@
       }
       break;
     case kIA32Cmp:
-      if (HasImmediateInput(instr, 1)) {
-        __ cmp(i.InputOperand(0), i.InputImmediate(1));
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ cmp(operand, i.InputImmediate(index));
+        } else {
+          __ cmp(operand, i.InputRegister(index));
+        }
       } else {
-        __ cmp(i.InputRegister(0), i.InputOperand(1));
+        if (HasImmediateInput(instr, 1)) {
+          __ cmp(i.InputOperand(0), i.InputImmediate(1));
+        } else {
+          __ cmp(i.InputRegister(0), i.InputOperand(1));
+        }
       }
       break;
     case kIA32Test:
-      if (HasImmediateInput(instr, 1)) {
-        __ test(i.InputOperand(0), i.InputImmediate(1));
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ test(operand, i.InputImmediate(index));
+        } else {
+          __ test(i.InputRegister(index), operand);
+        }
       } else {
-        __ test(i.InputRegister(0), i.InputOperand(1));
+        if (HasImmediateInput(instr, 1)) {
+          __ test(i.InputOperand(0), i.InputImmediate(1));
+        } else {
+          __ test(i.InputRegister(0), i.InputOperand(1));
+        }
       }
       break;
     case kIA32Imul:
@@ -739,6 +775,21 @@
     case kSSEFloat64ToFloat32:
       __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
+    case kSSEFloat32ToInt32:
+      __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
+      break;
+    case kSSEFloat32ToUint32: {
+      Label success;
+      __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
+      __ test(i.OutputRegister(), i.OutputRegister());
+      __ j(positive, &success);
+      __ Move(kScratchDoubleReg, static_cast<float>(INT32_MIN));
+      __ addss(kScratchDoubleReg, i.InputOperand(0));
+      __ cvttss2si(i.OutputRegister(), kScratchDoubleReg);
+      __ or_(i.OutputRegister(), Immediate(0x80000000));
+      __ bind(&success);
+      break;
+    }
     case kSSEFloat64ToInt32:
       __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
       break;
@@ -749,6 +800,16 @@
       __ add(i.OutputRegister(), Immediate(0x80000000));
       break;
     }
+    case kSSEInt32ToFloat32:
+      __ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+      break;
+    case kSSEUint32ToFloat32: {
+      Register scratch0 = i.TempRegister(0);
+      Register scratch1 = i.TempRegister(1);
+      __ mov(scratch0, i.InputOperand(0));
+      __ Cvtui2ss(i.OutputDoubleRegister(), scratch0, scratch1);
+      break;
+    }
     case kSSEInt32ToFloat64:
       __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
@@ -1441,8 +1502,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index 816487d..61fd035 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -58,8 +58,12 @@
   V(SSEFloat64Round)               \
   V(SSEFloat32ToFloat64)           \
   V(SSEFloat64ToFloat32)           \
+  V(SSEFloat32ToInt32)             \
+  V(SSEFloat32ToUint32)            \
   V(SSEFloat64ToInt32)             \
   V(SSEFloat64ToUint32)            \
+  V(SSEInt32ToFloat32)             \
+  V(SSEUint32ToFloat32)            \
   V(SSEInt32ToFloat64)             \
   V(SSEUint32ToFloat64)            \
   V(SSEFloat64ExtractLowWord32)    \
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
index 0a8fcac..093bc22 100644
--- a/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -61,8 +61,12 @@
     case kSSEFloat64Round:
     case kSSEFloat32ToFloat64:
     case kSSEFloat64ToFloat32:
+    case kSSEFloat32ToInt32:
+    case kSSEFloat32ToUint32:
     case kSSEFloat64ToInt32:
     case kSSEFloat64ToUint32:
+    case kSSEInt32ToFloat32:
+    case kSSEUint32ToFloat32:
     case kSSEInt32ToFloat64:
     case kSSEUint32ToFloat64:
     case kSSEFloat64ExtractLowWord32:
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 0906452..f649ba9 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -190,7 +190,8 @@
     case MachineRepresentation::kWord32:
       opcode = kIA32Movl;
       break;
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -275,7 +276,8 @@
       case MachineRepresentation::kWord32:
         opcode = kIA32Movl;
         break;
-      case MachineRepresentation::kWord64:  // Fall through.
+      case MachineRepresentation::kWord64:   // Fall through.
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -327,9 +329,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -373,9 +376,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -508,9 +512,10 @@
 void VisitMulHigh(InstructionSelector* selector, Node* node,
                   ArchOpcode opcode) {
   IA32OperandGenerator g(selector);
-  selector->Emit(opcode, g.DefineAsFixed(node, edx),
-                 g.UseFixed(node->InputAt(0), eax),
-                 g.UseUniqueRegister(node->InputAt(1)));
+  InstructionOperand temps[] = {g.TempRegister(eax)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, edx), g.UseFixed(node->InputAt(0), eax),
+      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
 }
 
 
@@ -525,9 +530,10 @@
 
 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
   IA32OperandGenerator g(selector);
+  InstructionOperand temps[] = {g.TempRegister(eax)};
   selector->Emit(opcode, g.DefineAsFixed(node, edx),
                  g.UseFixed(node->InputAt(0), eax),
-                 g.UseUnique(node->InputAt(1)));
+                 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
 }
 
 void EmitLea(InstructionSelector* selector, Node* result, Node* index,
@@ -591,6 +597,9 @@
 }
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
 void InstructionSelector::VisitWord32Popcnt(Node* node) {
   IA32OperandGenerator g(this);
   Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -695,6 +704,19 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRO(this, node, kSSEInt32ToFloat32);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  IA32OperandGenerator g(this);
+  InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+  Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+       arraysize(temps), temps);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRO(this, node, kSSEInt32ToFloat64);
 }
@@ -705,6 +727,16 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRO(this, node, kSSEFloat32ToInt32);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRO(this, node, kSSEFloat32ToUint32);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   VisitRO(this, node, kSSEFloat64ToInt32);
 }
@@ -958,6 +990,46 @@
 
 namespace {
 
+void VisitCompareWithMemoryOperand(InstructionSelector* selector,
+                                   InstructionCode opcode, Node* left,
+                                   InstructionOperand right,
+                                   FlagsContinuation* cont) {
+  DCHECK(left->opcode() == IrOpcode::kLoad);
+  IA32OperandGenerator g(selector);
+  size_t input_count = 0;
+  InstructionOperand inputs[6];
+  AddressingMode addressing_mode =
+      g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
+  opcode |= AddressingModeField::encode(addressing_mode);
+  opcode = cont->Encode(opcode);
+  inputs[input_count++] = right;
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else {
+    DCHECK(cont->IsSet());
+    InstructionOperand output = g.DefineAsRegister(cont->result());
+    selector->Emit(opcode, 1, &output, input_count, inputs);
+  }
+}
+
+// Determines if {input} of {node} can be replaced by a memory operand.
+bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
+                         Node* node, Node* input) {
+  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
+    return false;
+  }
+  MachineRepresentation load_representation =
+      LoadRepresentationOf(input->op()).representation();
+  if (load_representation == MachineRepresentation::kWord32 ||
+      load_representation == MachineRepresentation::kTagged) {
+    return opcode == kIA32Cmp || opcode == kIA32Test;
+  }
+  return false;
+}
+
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
@@ -1003,26 +1075,41 @@
   VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
 }
 
-
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       InstructionCode opcode, FlagsContinuation* cont) {
   IA32OperandGenerator g(selector);
-  Node* const left = node->InputAt(0);
-  Node* const right = node->InputAt(1);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
 
-  // Match immediates on left or right side of comparison.
-  if (g.CanBeImmediate(right)) {
-    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
-  } else if (g.CanBeImmediate(left)) {
+  // If one of the two inputs is an immediate, make sure it's on the right.
+  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
-  } else {
-    VisitCompare(selector, opcode, left, right, cont,
-                 node->op()->HasProperty(Operator::kCommutative));
+    std::swap(left, right);
   }
-}
 
+  // Match immediates on right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+      return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                           g.UseImmediate(right), cont);
+    }
+    return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
+                        cont);
+  }
+
+  if (g.CanBeBetterLeftOperand(right)) {
+    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+    std::swap(left, right);
+  }
+
+  if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                         g.UseRegister(right), cont);
+  }
+  return VisitCompare(selector, opcode, left, right, cont,
+                      node->op()->HasProperty(Operator::kCommutative));
+}
 
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       FlagsContinuation* cont) {
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index 6c31ac8..d2144cf 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -47,7 +47,6 @@
   V(ArchPrepareCallCFunction)      \
   V(ArchCallCFunction)             \
   V(ArchPrepareTailCall)           \
-  V(ArchLazyBailout)               \
   V(ArchJmp)                       \
   V(ArchLookupSwitch)              \
   V(ArchTableSwitch)               \
@@ -57,6 +56,7 @@
   V(ArchRet)                       \
   V(ArchStackPointer)              \
   V(ArchFramePointer)              \
+  V(ArchParentFramePointer)        \
   V(ArchTruncateDoubleToI)         \
   V(ArchStoreWithWriteBarrier)     \
   V(CheckedLoadInt8)               \
@@ -72,7 +72,8 @@
   V(CheckedStoreWord32)            \
   V(CheckedStoreWord64)            \
   V(CheckedStoreFloat32)           \
-  V(CheckedStoreFloat64)
+  V(CheckedStoreFloat64)           \
+  V(ArchStackSlot)
 
 #define ARCH_OPCODE_LIST(V)  \
   COMMON_ARCH_OPCODE_LIST(V) \
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
index 2f329ea..adbfd5d 100644
--- a/src/compiler/instruction-scheduler.cc
+++ b/src/compiler/instruction-scheduler.cc
@@ -5,11 +5,57 @@
 #include "src/compiler/instruction-scheduler.h"
 
 #include "src/base/adapters.h"
+#include "src/base/utils/random-number-generator.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Compare the two nodes and return true if node1 is a better candidate than
+// node2 (i.e. node1 should be scheduled before node2).
+bool InstructionScheduler::CriticalPathFirstQueue::CompareNodes(
+    ScheduleGraphNode *node1, ScheduleGraphNode *node2) const {
+  return node1->total_latency() > node2->total_latency();
+}
+
+
+InstructionScheduler::ScheduleGraphNode*
+InstructionScheduler::CriticalPathFirstQueue::PopBestCandidate(int cycle) {
+  DCHECK(!IsEmpty());
+  auto candidate = nodes_.end();
+  for (auto iterator = nodes_.begin(); iterator != nodes_.end(); ++iterator) {
+    // We only consider instructions that have all their operands ready and
+    // we try to schedule the critical path first.
+    if (cycle >= (*iterator)->start_cycle()) {
+      if ((candidate == nodes_.end()) || CompareNodes(*iterator, *candidate)) {
+        candidate = iterator;
+      }
+    }
+  }
+
+  if (candidate != nodes_.end()) {
+    ScheduleGraphNode *result = *candidate;
+    nodes_.erase(candidate);
+    return result;
+  }
+
+  return nullptr;
+}
+
+
+InstructionScheduler::ScheduleGraphNode*
+InstructionScheduler::StressSchedulerQueue::PopBestCandidate(int cycle) {
+  DCHECK(!IsEmpty());
+  // Choose a random element from the ready list.
+  auto candidate = nodes_.begin();
+  std::advance(candidate, isolate()->random_number_generator()->NextInt(
+      static_cast<int>(nodes_.size())));
+  ScheduleGraphNode *result = *candidate;
+  nodes_.erase(candidate);
+  return result;
+}
+
+
 InstructionScheduler::ScheduleGraphNode::ScheduleGraphNode(
     Zone* zone,
     Instruction* instr)
@@ -50,7 +96,11 @@
 
 
 void InstructionScheduler::EndBlock(RpoNumber rpo) {
-  ScheduleBlock();
+  if (FLAG_turbo_stress_instruction_scheduling) {
+    ScheduleBlock<StressSchedulerQueue>();
+  } else {
+    ScheduleBlock<CriticalPathFirstQueue>();
+  }
   sequence()->EndBlock(rpo);
   graph_.clear();
   last_side_effect_instr_ = nullptr;
@@ -110,14 +160,9 @@
 }
 
 
-bool InstructionScheduler::CompareNodes(ScheduleGraphNode *node1,
-                                        ScheduleGraphNode *node2) const {
-  return node1->total_latency() > node2->total_latency();
-}
-
-
+template <typename QueueType>
 void InstructionScheduler::ScheduleBlock() {
-  ZoneLinkedList<ScheduleGraphNode*> ready_list(zone());
+  QueueType ready_list(this);
 
   // Compute total latencies so that we can schedule the critical path first.
   ComputeTotalLatencies();
@@ -125,43 +170,28 @@
   // Add nodes which don't have dependencies to the ready list.
   for (auto node : graph_) {
     if (!node->HasUnscheduledPredecessor()) {
-      ready_list.push_back(node);
+      ready_list.AddNode(node);
     }
   }
 
   // Go through the ready list and schedule the instructions.
   int cycle = 0;
-  while (!ready_list.empty()) {
-    auto candidate = ready_list.end();
-    for (auto iterator = ready_list.begin(); iterator != ready_list.end();
-         ++iterator) {
-      // Look for the best candidate to schedule.
-      // We only consider instructions that have all their operands ready and
-      // we try to schedule the critical path first (we look for the instruction
-      // with the highest latency on the path to reach the end of the graph).
-      if (cycle >= (*iterator)->start_cycle()) {
-        if ((candidate == ready_list.end()) ||
-            CompareNodes(*iterator, *candidate)) {
-          candidate = iterator;
-        }
-      }
-    }
+  while (!ready_list.IsEmpty()) {
+    auto candidate = ready_list.PopBestCandidate(cycle);
 
-    if (candidate != ready_list.end()) {
-      sequence()->AddInstruction((*candidate)->instruction());
+    if (candidate != nullptr) {
+      sequence()->AddInstruction(candidate->instruction());
 
-      for (auto successor : (*candidate)->successors()) {
+      for (auto successor : candidate->successors()) {
         successor->DropUnscheduledPredecessor();
         successor->set_start_cycle(
             std::max(successor->start_cycle(),
-                     cycle + (*candidate)->latency()));
+                     cycle + candidate->latency()));
 
         if (!successor->HasUnscheduledPredecessor()) {
-          ready_list.push_back(successor);
+          ready_list.AddNode(successor);
         }
       }
-
-      ready_list.erase(candidate);
     }
 
     cycle++;
@@ -172,17 +202,22 @@
 int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
   switch (instr->arch_opcode()) {
     case kArchNop:
-    case kArchStackPointer:
     case kArchFramePointer:
+    case kArchParentFramePointer:
     case kArchTruncateDoubleToI:
+    case kArchStackSlot:
       return kNoOpcodeFlags;
 
+    case kArchStackPointer:
+      // ArchStackPointer instruction loads the current stack pointer value and
+      // must not be reordered with instruction with side effects.
+      return kIsLoadOperation;
+
     case kArchPrepareCallCFunction:
     case kArchPrepareTailCall:
     case kArchCallCFunction:
     case kArchCallCodeObject:
     case kArchCallJSFunction:
-    case kArchLazyBailout:
       return kHasSideEffect;
 
     case kArchTailCallCodeObject:
diff --git a/src/compiler/instruction-scheduler.h b/src/compiler/instruction-scheduler.h
index fafbe47..104c0b9 100644
--- a/src/compiler/instruction-scheduler.h
+++ b/src/compiler/instruction-scheduler.h
@@ -90,11 +90,66 @@
     int start_cycle_;
   };
 
-  // Compare the two nodes and return true if node1 is a better candidate than
-  // node2 (i.e. node1 should be scheduled before node2).
-  bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
+  // Keep track of all nodes ready to be scheduled (i.e. all their dependencies
+  // have been scheduled. Note that this class is inteded to be extended by
+  // concrete implementation of the scheduling queue which define the policy
+  // to pop node from the queue.
+  class SchedulingQueueBase {
+   public:
+    explicit SchedulingQueueBase(InstructionScheduler* scheduler)
+      : scheduler_(scheduler),
+        nodes_(scheduler->zone()) {
+    }
 
-  // Perform scheduling for the current block.
+    void AddNode(ScheduleGraphNode* node) {
+      nodes_.push_back(node);
+    }
+
+    bool IsEmpty() const {
+      return nodes_.empty();
+    }
+
+   protected:
+    InstructionScheduler* scheduler_;
+    ZoneLinkedList<ScheduleGraphNode*> nodes_;
+  };
+
+  // A scheduling queue which prioritize nodes on the critical path (we look
+  // for the instruction with the highest latency on the path to reach the end
+  // of the graph).
+  class CriticalPathFirstQueue : public SchedulingQueueBase  {
+   public:
+    explicit CriticalPathFirstQueue(InstructionScheduler* scheduler)
+      : SchedulingQueueBase(scheduler) { }
+
+    // Look for the best candidate to schedule, remove it from the queue and
+    // return it.
+    ScheduleGraphNode* PopBestCandidate(int cycle);
+
+   private:
+    // Compare the two nodes and return true if node1 is a better candidate than
+    // node2 (i.e. node1 should be scheduled before node2).
+    bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
+  };
+
+  // A queue which pop a random node from the queue to perform stress tests on
+  // the scheduler.
+  class StressSchedulerQueue : public SchedulingQueueBase  {
+   public:
+    explicit StressSchedulerQueue(InstructionScheduler* scheduler)
+      : SchedulingQueueBase(scheduler) { }
+
+    ScheduleGraphNode* PopBestCandidate(int cycle);
+
+   private:
+    Isolate *isolate() {
+      return scheduler_->isolate();
+    }
+  };
+
+  // Perform scheduling for the current block specifying the queue type to
+  // use to determine the next best candidate.
+  template <typename QueueType>
   void ScheduleBlock();
 
   // Return the scheduling properties of the given instruction.
@@ -134,6 +189,7 @@
 
   Zone* zone() { return zone_; }
   InstructionSequence* sequence() { return sequence_; }
+  Isolate* isolate() { return sequence()->isolate(); }
 
   Zone* zone_;
   InstructionSequence* sequence_;
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 86868e5..0f27e50 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -21,7 +21,7 @@
 InstructionSelector::InstructionSelector(
     Zone* zone, size_t node_count, Linkage* linkage,
     InstructionSequence* sequence, Schedule* schedule,
-    SourcePositionTable* source_positions,
+    SourcePositionTable* source_positions, Frame* frame,
     SourcePositionMode source_position_mode, Features features)
     : zone_(zone),
       linkage_(linkage),
@@ -34,9 +34,11 @@
       instructions_(zone),
       defined_(node_count, false, zone),
       used_(node_count, false, zone),
+      effect_level_(node_count, 0, zone),
       virtual_registers_(node_count,
                          InstructionOperand::kInvalidVirtualRegister, zone),
-      scheduler_(nullptr) {
+      scheduler_(nullptr),
+      frame_(frame) {
   instructions_.reserve(node_count);
 }
 
@@ -217,10 +219,11 @@
 
 bool InstructionSelector::CanCover(Node* user, Node* node) const {
   return node->OwnedBy(user) &&
-         schedule()->block(node) == schedule()->block(user);
+         schedule()->block(node) == schedule()->block(user) &&
+         (node->op()->HasProperty(Operator::kPure) ||
+          GetEffectLevel(node) == GetEffectLevel(user));
 }
 
-
 int InstructionSelector::GetVirtualRegister(const Node* node) {
   DCHECK_NOT_NULL(node);
   size_t const id = node->id();
@@ -279,6 +282,19 @@
   used_[id] = true;
 }
 
+int InstructionSelector::GetEffectLevel(Node* node) const {
+  DCHECK_NOT_NULL(node);
+  size_t const id = node->id();
+  DCHECK_LT(id, effect_level_.size());
+  return effect_level_[id];
+}
+
+void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
+  DCHECK_NOT_NULL(node);
+  size_t const id = node->id();
+  DCHECK_LT(id, effect_level_.size());
+  effect_level_[id] = effect_level;
+}
 
 void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
                                                const InstructionOperand& op) {
@@ -567,10 +583,6 @@
           g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
                         buffer->descriptor->GetInputType(0).representation()));
       break;
-    case CallDescriptor::kLazyBailout:
-      // The target is ignored, but we still need to pass a value here.
-      buffer->instruction_args.push_back(g.UseImmediate(callee));
-      break;
   }
   DCHECK_EQ(1u, buffer->instruction_args.size());
 
@@ -581,13 +593,29 @@
   size_t frame_state_entries = 0;
   USE(frame_state_entries);  // frame_state_entries is only used for debug.
   if (buffer->frame_state_descriptor != nullptr) {
+    Node* frame_state =
+        call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
+
+    // If it was a syntactic tail call we need to drop the current frame and
+    // an arguments adaptor frame on top of it (if the latter is present).
+    if (buffer->descriptor->SupportsTailCalls()) {
+      frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+      buffer->frame_state_descriptor =
+          buffer->frame_state_descriptor->outer_state();
+
+      if (buffer->frame_state_descriptor != nullptr &&
+          buffer->frame_state_descriptor->type() ==
+              FrameStateType::kArgumentsAdaptor) {
+        frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+        buffer->frame_state_descriptor =
+            buffer->frame_state_descriptor->outer_state();
+      }
+    }
+
     InstructionSequence::StateId state_id =
         sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
     buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
 
-    Node* frame_state =
-        call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
-
     StateObjectDeduplicator deduplicator(instruction_zone());
 
     frame_state_entries =
@@ -656,6 +684,16 @@
   current_block_ = block;
   int current_block_end = static_cast<int>(instructions_.size());
 
+  int effect_level = 0;
+  for (Node* const node : *block) {
+    if (node->opcode() == IrOpcode::kStore ||
+        node->opcode() == IrOpcode::kCheckedStore ||
+        node->opcode() == IrOpcode::kCall) {
+      ++effect_level;
+    }
+    SetEffectLevel(node, effect_level);
+  }
+
   // Generate code for the block control "top down", but schedule the code
   // "bottom up".
   VisitControl(block);
@@ -767,7 +805,7 @@
       DCHECK_EQ(IrOpcode::kThrow, input->opcode());
       return VisitThrow(input->InputAt(0));
     case BasicBlock::kNone: {
-      // TODO(titzer): exit block doesn't have control.
+      // Exit block doesn't have control.
       DCHECK_NULL(input);
       break;
     }
@@ -866,6 +904,8 @@
       return MarkAsWord32(node), VisitWord32Clz(node);
     case IrOpcode::kWord32Ctz:
       return MarkAsWord32(node), VisitWord32Ctz(node);
+    case IrOpcode::kWord32ReverseBits:
+      return MarkAsWord32(node), VisitWord32ReverseBits(node);
     case IrOpcode::kWord32Popcnt:
       return MarkAsWord32(node), VisitWord32Popcnt(node);
     case IrOpcode::kWord64Popcnt:
@@ -888,6 +928,8 @@
       return MarkAsWord64(node), VisitWord64Clz(node);
     case IrOpcode::kWord64Ctz:
       return MarkAsWord64(node), VisitWord64Ctz(node);
+    case IrOpcode::kWord64ReverseBits:
+      return MarkAsWord64(node), VisitWord64ReverseBits(node);
     case IrOpcode::kWord64Equal:
       return VisitWord64Equal(node);
     case IrOpcode::kInt32Add:
@@ -956,6 +998,10 @@
       return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
     case IrOpcode::kChangeFloat64ToUint32:
       return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+    case IrOpcode::kTruncateFloat32ToInt32:
+      return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
+    case IrOpcode::kTruncateFloat32ToUint32:
+      return MarkAsWord32(node), VisitTruncateFloat32ToUint32(node);
     case IrOpcode::kTryTruncateFloat32ToInt64:
       return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
     case IrOpcode::kTryTruncateFloat64ToInt64:
@@ -976,10 +1022,14 @@
       return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
     case IrOpcode::kRoundInt64ToFloat32:
       return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
+    case IrOpcode::kRoundInt32ToFloat32:
+      return MarkAsFloat32(node), VisitRoundInt32ToFloat32(node);
     case IrOpcode::kRoundInt64ToFloat64:
       return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
     case IrOpcode::kBitcastFloat32ToInt32:
       return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
+    case IrOpcode::kRoundUint32ToFloat32:
+      return MarkAsFloat32(node), VisitRoundUint32ToFloat32(node);
     case IrOpcode::kRoundUint64ToFloat32:
       return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
     case IrOpcode::kRoundUint64ToFloat64:
@@ -1062,10 +1112,14 @@
       return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
     case IrOpcode::kFloat64InsertHighWord32:
       return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
+    case IrOpcode::kStackSlot:
+      return VisitStackSlot(node);
     case IrOpcode::kLoadStackPointer:
       return VisitLoadStackPointer(node);
     case IrOpcode::kLoadFramePointer:
       return VisitLoadFramePointer(node);
+    case IrOpcode::kLoadParentFramePointer:
+      return VisitLoadParentFramePointer(node);
     case IrOpcode::kCheckedLoad: {
       MachineRepresentation rep =
           CheckedLoadRepresentationOf(node->op()).representation();
@@ -1090,9 +1144,14 @@
 
 void InstructionSelector::VisitLoadFramePointer(Node* node) {
   OperandGenerator g(this);
+  frame_->MarkNeedsFrame();
   Emit(kArchFramePointer, g.DefineAsRegister(node));
 }
 
+void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchParentFramePointer, g.DefineAsRegister(node));
+}
 
 void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
                                           InstructionOperand& index_operand) {
@@ -1129,6 +1188,14 @@
   Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
 }
 
+void InstructionSelector::VisitStackSlot(Node* node) {
+  int size = 1 << ElementSizeLog2Of(StackSlotRepresentationOf(node->op()));
+  int slot = frame_->AllocateSpillSlot(size);
+  OperandGenerator g(this);
+
+  Emit(kArchStackSlot, g.DefineAsRegister(node),
+       sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
 
 // 32 bit targets do not implement the following instructions.
 #if V8_TARGET_ARCH_32_BIT
@@ -1160,6 +1227,11 @@
 void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
 
 
+void InstructionSelector::VisitWord64ReverseBits(Node* node) {
+  UNIMPLEMENTED();
+}
+
+
 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
 
 
@@ -1412,6 +1484,13 @@
     buffer.instruction_args.push_back(g.Label(handler));
   }
 
+  // (arm64 only) caller uses JSSP but callee might destroy it.
+  if (descriptor->UseNativeStack() &&
+      !linkage()->GetIncomingDescriptor()->UseNativeStack()) {
+    flags |= CallDescriptor::kRestoreJSSP;
+  }
+
+
   // Select the appropriate opcode based on the call type.
   InstructionCode opcode = kArchNop;
   switch (descriptor->kind()) {
@@ -1426,9 +1505,6 @@
     case CallDescriptor::kCallJSFunction:
       opcode = kArchCallJSFunction | MiscField::encode(flags);
       break;
-    case CallDescriptor::kLazyBailout:
-      opcode = kArchLazyBailout | MiscField::encode(flags);
-      break;
   }
 
   // Emit the call instruction.
@@ -1585,7 +1661,7 @@
 
 void InstructionSelector::VisitThrow(Node* value) {
   OperandGenerator g(this);
-  Emit(kArchThrowTerminator, g.NoOutput());  // TODO(titzer)
+  Emit(kArchThrowTerminator, g.NoOutput());
 }
 
 
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 52aea70..a01cab4 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -52,7 +52,7 @@
   InstructionSelector(
       Zone* zone, size_t node_count, Linkage* linkage,
       InstructionSequence* sequence, Schedule* schedule,
-      SourcePositionTable* source_positions,
+      SourcePositionTable* source_positions, Frame* frame,
       SourcePositionMode source_position_mode = kCallSourcePositions,
       Features features = SupportedFeatures());
 
@@ -149,6 +149,9 @@
   // Checks if {node} is currently live.
   bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
 
+  // Gets the effect level of {node}.
+  int GetEffectLevel(Node* node) const;
+
   int GetVirtualRegister(const Node* node);
   const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
 
@@ -168,6 +171,9 @@
   // will need to generate code for it.
   void MarkAsUsed(Node* node);
 
+  // Sets the effect level of {node}.
+  void SetEffectLevel(Node* node, int effect_level);
+
   // Inform the register allocation of the representation of the value produced
   // by {node}.
   void MarkAsRepresentation(MachineRepresentation rep, Node* node);
@@ -269,8 +275,10 @@
   ZoneVector<Instruction*> instructions_;
   BoolVector defined_;
   BoolVector used_;
+  IntVector effect_level_;
   IntVector virtual_registers_;
   InstructionScheduler* scheduler_;
+  Frame* frame_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 383e27d..d4ec6bc 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -164,6 +164,9 @@
         case MachineRepresentation::kFloat64:
           os << "|f64";
           break;
+        case MachineRepresentation::kSimd128:
+          os << "|s128";
+          break;
         case MachineRepresentation::kTagged:
           os << "|t";
           break;
@@ -615,6 +618,20 @@
   return blocks;
 }
 
+void InstructionSequence::Validate() {
+  // Validate blocks are in edge-split form: no block with multiple successors
+  // has an edge to a block (== a successor) with more than one predecessors.
+  for (const InstructionBlock* block : instruction_blocks()) {
+    if (block->SuccessorCount() > 1) {
+      for (const RpoNumber& successor_id : block->successors()) {
+        const InstructionBlock* successor = InstructionBlockAt(successor_id);
+        // Expect precisely one predecessor: "block".
+        CHECK(successor->PredecessorCount() == 1 &&
+              successor->predecessors()[0] == block->rpo_number());
+      }
+    }
+  }
+}
 
 void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
   int ao = 0;
@@ -648,6 +665,10 @@
       representations_(zone()),
       deoptimization_entries_(zone()) {
   block_starts_.reserve(instruction_blocks_->size());
+
+#if DEBUG
+  Validate();
+#endif
 }
 
 
@@ -726,6 +747,7 @@
     case MachineRepresentation::kWord64:
     case MachineRepresentation::kFloat32:
     case MachineRepresentation::kFloat64:
+    case MachineRepresentation::kSimd128:
     case MachineRepresentation::kTagged:
       return rep;
     case MachineRepresentation::kNone:
@@ -819,6 +841,62 @@
   Print(config);
 }
 
+void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
+                                     int block_id) const {
+  OFStream os(stdout);
+  RpoNumber rpo = RpoNumber::FromInt(block_id);
+  const InstructionBlock* block = InstructionBlockAt(rpo);
+  CHECK(block->rpo_number() == rpo);
+
+  os << "B" << block->rpo_number();
+  os << ": AO#" << block->ao_number();
+  if (block->IsDeferred()) os << " (deferred)";
+  if (!block->needs_frame()) os << " (no frame)";
+  if (block->must_construct_frame()) os << " (construct frame)";
+  if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
+  if (block->IsLoopHeader()) {
+    os << " loop blocks: [" << block->rpo_number() << ", " << block->loop_end()
+       << ")";
+  }
+  os << "  instructions: [" << block->code_start() << ", " << block->code_end()
+     << ")\n  predecessors:";
+
+  for (auto pred : block->predecessors()) {
+    os << " B" << pred.ToInt();
+  }
+  os << "\n";
+
+  for (auto phi : block->phis()) {
+    PrintableInstructionOperand printable_op = {config, phi->output()};
+    os << "     phi: " << printable_op << " =";
+    for (auto input : phi->operands()) {
+      os << " v" << input;
+    }
+    os << "\n";
+  }
+
+  ScopedVector<char> buf(32);
+  PrintableInstruction printable_instr;
+  printable_instr.register_configuration_ = config;
+  for (int j = block->first_instruction_index();
+       j <= block->last_instruction_index(); j++) {
+    // TODO(svenpanne) Add some basic formatting to our streams.
+    SNPrintF(buf, "%5d", j);
+    printable_instr.instr_ = InstructionAt(j);
+    os << "   " << buf.start() << ": " << printable_instr << "\n";
+  }
+
+  for (auto succ : block->successors()) {
+    os << " B" << succ.ToInt();
+  }
+  os << "\n";
+}
+
+void InstructionSequence::PrintBlock(int block_id) const {
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+  PrintBlock(config, block_id);
+}
 
 FrameStateDescriptor::FrameStateDescriptor(
     Zone* zone, FrameStateType type, BailoutId bailout_id,
@@ -901,53 +979,7 @@
     os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
   }
   for (int i = 0; i < code.InstructionBlockCount(); i++) {
-    RpoNumber rpo = RpoNumber::FromInt(i);
-    const InstructionBlock* block = code.InstructionBlockAt(rpo);
-    CHECK(block->rpo_number() == rpo);
-
-    os << "B" << block->rpo_number();
-    os << ": AO#" << block->ao_number();
-    if (block->IsDeferred()) os << " (deferred)";
-    if (!block->needs_frame()) os << " (no frame)";
-    if (block->must_construct_frame()) os << " (construct frame)";
-    if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
-    if (block->IsLoopHeader()) {
-      os << " loop blocks: [" << block->rpo_number() << ", "
-         << block->loop_end() << ")";
-    }
-    os << "  instructions: [" << block->code_start() << ", "
-       << block->code_end() << ")\n  predecessors:";
-
-    for (auto pred : block->predecessors()) {
-      os << " B" << pred.ToInt();
-    }
-    os << "\n";
-
-    for (auto phi : block->phis()) {
-      PrintableInstructionOperand printable_op = {
-          printable.register_configuration_, phi->output()};
-      os << "     phi: " << printable_op << " =";
-      for (auto input : phi->operands()) {
-        os << " v" << input;
-      }
-      os << "\n";
-    }
-
-    ScopedVector<char> buf(32);
-    PrintableInstruction printable_instr;
-    printable_instr.register_configuration_ = printable.register_configuration_;
-    for (int j = block->first_instruction_index();
-         j <= block->last_instruction_index(); j++) {
-      // TODO(svenpanne) Add some basic formatting to our streams.
-      SNPrintF(buf, "%5d", j);
-      printable_instr.instr_ = code.InstructionAt(j);
-      os << "   " << buf.start() << ": " << printable_instr << "\n";
-    }
-
-    for (auto succ : block->successors()) {
-      os << " B" << succ.ToInt();
-    }
-    os << "\n";
+    printable.sequence_->PrintBlock(printable.register_configuration_, i);
   }
   return os;
 }
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 8a6a0ae..9c978ce 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -67,8 +67,10 @@
   inline bool IsAnyRegister() const;
   inline bool IsRegister() const;
   inline bool IsDoubleRegister() const;
+  inline bool IsSimd128Register() const;
   inline bool IsStackSlot() const;
   inline bool IsDoubleStackSlot() const;
+  inline bool IsSimd128StackSlot() const;
 
   template <typename SubKindOperand>
   static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
@@ -411,7 +413,7 @@
   }
 
   int index() const {
-    DCHECK(IsStackSlot() || IsDoubleStackSlot());
+    DCHECK(IsStackSlot() || IsDoubleStackSlot() || IsSimd128StackSlot());
     return static_cast<int64_t>(value_) >> IndexField::kShift;
   }
 
@@ -427,6 +429,12 @@
                                      IndexField::kShift);
   }
 
+  Simd128Register GetSimd128Register() const {
+    DCHECK(IsSimd128Register());
+    return Simd128Register::from_code(static_cast<int64_t>(value_) >>
+                                      IndexField::kShift);
+  }
+
   LocationKind location_kind() const {
     return LocationKindField::decode(value_);
   }
@@ -441,6 +449,7 @@
       case MachineRepresentation::kWord64:
       case MachineRepresentation::kFloat32:
       case MachineRepresentation::kFloat64:
+      case MachineRepresentation::kSimd128:
       case MachineRepresentation::kTagged:
         return true;
       case MachineRepresentation::kBit:
@@ -522,6 +531,12 @@
          IsFloatingPoint(LocationOperand::cast(this)->representation());
 }
 
+bool InstructionOperand::IsSimd128Register() const {
+  return IsAnyRegister() &&
+         LocationOperand::cast(this)->representation() ==
+             MachineRepresentation::kSimd128;
+}
+
 bool InstructionOperand::IsStackSlot() const {
   return (IsAllocated() || IsExplicit()) &&
          LocationOperand::cast(this)->location_kind() ==
@@ -536,6 +551,14 @@
          IsFloatingPoint(LocationOperand::cast(this)->representation());
 }
 
+bool InstructionOperand::IsSimd128StackSlot() const {
+  return (IsAllocated() || IsExplicit()) &&
+         LocationOperand::cast(this)->location_kind() ==
+             LocationOperand::STACK_SLOT &&
+         LocationOperand::cast(this)->representation() ==
+             MachineRepresentation::kSimd128;
+}
+
 uint64_t InstructionOperand::GetCanonicalizedValue() const {
   if (IsAllocated() || IsExplicit()) {
     // TODO(dcarney): put machine type last and mask.
@@ -633,8 +656,14 @@
 
   MoveOperands* AddMove(const InstructionOperand& from,
                         const InstructionOperand& to) {
-    auto zone = get_allocator().zone();
-    auto move = new (zone) MoveOperands(from, to);
+    Zone* zone = get_allocator().zone();
+    return AddMove(from, to, zone);
+  }
+
+  MoveOperands* AddMove(const InstructionOperand& from,
+                        const InstructionOperand& to,
+                        Zone* operand_allocation_zone) {
+    MoveOperands* move = new (operand_allocation_zone) MoveOperands(from, to);
     push_back(move);
     return move;
   }
@@ -732,7 +761,6 @@
     return FlagsConditionField::decode(opcode());
   }
 
-  // TODO(titzer): make call into a flags.
   static Instruction* New(Zone* zone, InstructionCode opcode) {
     return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
   }
@@ -1323,6 +1351,11 @@
   void Print(const RegisterConfiguration* config) const;
   void Print() const;
 
+  void PrintBlock(const RegisterConfiguration* config, int block_id) const;
+  void PrintBlock(int block_id) const;
+
+  void Validate();
+
  private:
   friend std::ostream& operator<<(std::ostream& os,
                                   const PrintableInstructionSequence& code);
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
new file mode 100644
index 0000000..ff31abe
--- /dev/null
+++ b/src/compiler/int64-lowering.cc
@@ -0,0 +1,299 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/int64-lowering.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+
+#include "src/compiler/node.h"
+#include "src/wasm/wasm-module.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Int64Lowering::Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
+                             CommonOperatorBuilder* common, Zone* zone,
+                             Signature<MachineRepresentation>* signature)
+    : zone_(zone),
+      graph_(graph),
+      machine_(machine),
+      common_(common),
+      state_(graph, 4),
+      stack_(zone),
+      replacements_(zone->NewArray<Replacement>(graph->NodeCount())),
+      signature_(signature) {
+  memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
+}
+
+void Int64Lowering::LowerGraph() {
+  if (4 != kPointerSize) {
+    return;
+  }
+  stack_.push(graph()->end());
+  state_.Set(graph()->end(), State::kOnStack);
+
+  while (!stack_.empty()) {
+    Node* top = stack_.top();
+    if (state_.Get(top) == State::kInputsPushed) {
+      stack_.pop();
+      state_.Set(top, State::kVisited);
+      // All inputs of top have already been reduced, now reduce top.
+      LowerNode(top);
+    } else {
+      // Push all children onto the stack.
+      for (Node* input : top->inputs()) {
+        if (state_.Get(input) == State::kUnvisited) {
+          stack_.push(input);
+          state_.Set(input, State::kOnStack);
+        }
+      }
+      state_.Set(top, State::kInputsPushed);
+    }
+  }
+}
+
+static int GetParameterIndexAfterLowering(
+    Signature<MachineRepresentation>* signature, int old_index) {
+  int result = old_index;
+  for (int i = 0; i < old_index; i++) {
+    if (signature->GetParam(i) == MachineRepresentation::kWord64) {
+      result++;
+    }
+  }
+  return result;
+}
+
+static int GetParameterCountAfterLowering(
+    Signature<MachineRepresentation>* signature) {
+  return GetParameterIndexAfterLowering(
+      signature, static_cast<int>(signature->parameter_count()));
+}
+
+static int GetReturnCountAfterLowering(
+    Signature<MachineRepresentation>* signature) {
+  int result = static_cast<int>(signature->return_count());
+  for (int i = 0; i < static_cast<int>(signature->return_count()); i++) {
+    if (signature->GetReturn(i) == MachineRepresentation::kWord64) {
+      result++;
+    }
+  }
+  return result;
+}
+
+void Int64Lowering::LowerNode(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt64Constant: {
+      int64_t value = OpParameter<int64_t>(node);
+      Node* low_node = graph()->NewNode(
+          common()->Int32Constant(static_cast<int32_t>(value & 0xFFFFFFFF)));
+      Node* high_node = graph()->NewNode(
+          common()->Int32Constant(static_cast<int32_t>(value >> 32)));
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    case IrOpcode::kLoad: {
+      LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+
+      if (load_rep.representation() == MachineRepresentation::kWord64) {
+        Node* base = node->InputAt(0);
+        Node* index = node->InputAt(1);
+        Node* index_high =
+            graph()->NewNode(machine()->Int32Add(), index,
+                             graph()->NewNode(common()->Int32Constant(4)));
+
+        const Operator* load_op = machine()->Load(MachineType::Int32());
+        Node* high_node;
+        if (node->InputCount() > 2) {
+          Node* effect_high = node->InputAt(2);
+          Node* control_high = node->InputAt(3);
+          high_node = graph()->NewNode(load_op, base, index_high, effect_high,
+                                       control_high);
+          // change the effect change from old_node --> old_effect to
+          // old_node --> high_node --> old_effect.
+          node->ReplaceInput(2, high_node);
+        } else {
+          high_node = graph()->NewNode(load_op, base, index_high);
+        }
+        NodeProperties::ChangeOp(node, load_op);
+        ReplaceNode(node, node, high_node);
+      }
+      break;
+    }
+    case IrOpcode::kStore: {
+      StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+      if (store_rep.representation() == MachineRepresentation::kWord64) {
+        // We change the original store node to store the low word, and create
+        // a new store node to store the high word. The effect and control edges
+        // are copied from the original store to the new store node, the effect
+        // edge of the original store is redirected to the new store.
+        WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+
+        Node* base = node->InputAt(0);
+        Node* index = node->InputAt(1);
+        Node* index_high =
+            graph()->NewNode(machine()->Int32Add(), index,
+                             graph()->NewNode(common()->Int32Constant(4)));
+
+        Node* value = node->InputAt(2);
+        DCHECK(HasReplacementLow(value));
+        DCHECK(HasReplacementHigh(value));
+
+        const Operator* store_op = machine()->Store(StoreRepresentation(
+            MachineRepresentation::kWord32, write_barrier_kind));
+
+        Node* high_node;
+        if (node->InputCount() > 3) {
+          Node* effect_high = node->InputAt(3);
+          Node* control_high = node->InputAt(4);
+          high_node = graph()->NewNode(store_op, base, index_high,
+                                       GetReplacementHigh(value), effect_high,
+                                       control_high);
+          node->ReplaceInput(3, high_node);
+
+        } else {
+          high_node = graph()->NewNode(store_op, base, index_high,
+                                       GetReplacementHigh(value));
+        }
+
+        node->ReplaceInput(2, GetReplacementLow(value));
+        NodeProperties::ChangeOp(node, store_op);
+        ReplaceNode(node, node, high_node);
+      }
+      break;
+    }
+    case IrOpcode::kWord64And: {
+      DCHECK(node->InputCount() == 2);
+      Node* left = node->InputAt(0);
+      Node* right = node->InputAt(1);
+
+      Node* low_node =
+          graph()->NewNode(machine()->Word32And(), GetReplacementLow(left),
+                           GetReplacementLow(right));
+      Node* high_node =
+          graph()->NewNode(machine()->Word32And(), GetReplacementHigh(left),
+                           GetReplacementHigh(right));
+      ReplaceNode(node, low_node, high_node);
+      break;
+    }
+    case IrOpcode::kTruncateInt64ToInt32: {
+      DCHECK(node->InputCount() == 1);
+      Node* input = node->InputAt(0);
+      ReplaceNode(node, GetReplacementLow(input), nullptr);
+      node->NullAllInputs();
+      break;
+    }
+    case IrOpcode::kStart: {
+      int parameter_count = GetParameterCountAfterLowering(signature());
+      // Only exchange the node if the parameter count actually changed.
+      if (parameter_count != signature()->parameter_count()) {
+        int delta =
+            parameter_count - static_cast<int>(signature()->parameter_count());
+        int new_output_count = node->op()->ValueOutputCount() + delta;
+        NodeProperties::ChangeOp(node, common()->Start(new_output_count));
+      }
+      break;
+    }
+    case IrOpcode::kParameter: {
+      DCHECK(node->InputCount() == 1);
+      // Only exchange the node if the parameter count actually changed. We do
+      // not even have to do the default lowering because the the start node,
+      // the only input of a parameter node, only changes if the parameter count
+      // changes.
+      if (GetParameterCountAfterLowering(signature()) !=
+          signature()->parameter_count()) {
+        int old_index = ParameterIndexOf(node->op());
+        int new_index = GetParameterIndexAfterLowering(signature(), old_index);
+        NodeProperties::ChangeOp(node, common()->Parameter(new_index));
+
+        Node* high_node = nullptr;
+        if (signature()->GetParam(old_index) ==
+            MachineRepresentation::kWord64) {
+          high_node = graph()->NewNode(common()->Parameter(new_index + 1),
+                                       graph()->start());
+        }
+        ReplaceNode(node, node, high_node);
+      }
+      break;
+    }
+    case IrOpcode::kReturn: {
+      DefaultLowering(node);
+      int new_return_count = GetReturnCountAfterLowering(signature());
+      if (signature()->return_count() != new_return_count) {
+        NodeProperties::ChangeOp(node, common()->Return(new_return_count));
+      }
+      break;
+    }
+    case IrOpcode::kCall: {
+      CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+      if (DefaultLowering(node) ||
+          (descriptor->ReturnCount() == 1 &&
+           descriptor->GetReturnType(0) == MachineType::Int64())) {
+        // We have to adjust the call descriptor.
+        const Operator* op = common()->Call(
+            wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), descriptor));
+        NodeProperties::ChangeOp(node, op);
+      }
+      if (descriptor->ReturnCount() == 1 &&
+          descriptor->GetReturnType(0) == MachineType::Int64()) {
+        // We access the additional return values through projections.
+        Node* low_node = graph()->NewNode(common()->Projection(0), node);
+        Node* high_node = graph()->NewNode(common()->Projection(1), node);
+        ReplaceNode(node, low_node, high_node);
+      }
+      break;
+    }
+    default: { DefaultLowering(node); }
+  }
+}
+
+bool Int64Lowering::DefaultLowering(Node* node) {
+  bool something_changed = false;
+  for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
+    Node* input = node->InputAt(i);
+    if (HasReplacementLow(input)) {
+      something_changed = true;
+      node->ReplaceInput(i, GetReplacementLow(input));
+    }
+    if (HasReplacementHigh(input)) {
+      something_changed = true;
+      node->InsertInput(zone(), i + 1, GetReplacementHigh(input));
+    }
+  }
+  return something_changed;
+}
+
+void Int64Lowering::ReplaceNode(Node* old, Node* new_low, Node* new_high) {
+  // if new_low == nullptr, then also new_high == nullptr.
+  DCHECK(new_low != nullptr || new_high == nullptr);
+  replacements_[old->id()].low = new_low;
+  replacements_[old->id()].high = new_high;
+}
+
+bool Int64Lowering::HasReplacementLow(Node* node) {
+  return replacements_[node->id()].low != nullptr;
+}
+
+Node* Int64Lowering::GetReplacementLow(Node* node) {
+  Node* result = replacements_[node->id()].low;
+  DCHECK(result);
+  return result;
+}
+
+bool Int64Lowering::HasReplacementHigh(Node* node) {
+  return replacements_[node->id()].high != nullptr;
+}
+
+Node* Int64Lowering::GetReplacementHigh(Node* node) {
+  Node* result = replacements_[node->id()].high;
+  DCHECK(result);
+  return result;
+}
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
new file mode 100644
index 0000000..79a95dc
--- /dev/null
+++ b/src/compiler/int64-lowering.h
@@ -0,0 +1,63 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INT64_REDUCER_H_
+#define V8_COMPILER_INT64_REDUCER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-marker.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Int64Lowering {
+ public:
+  Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
+                CommonOperatorBuilder* common, Zone* zone,
+                Signature<MachineRepresentation>* signature);
+
+  void LowerGraph();
+
+ private:
+  enum class State : uint8_t { kUnvisited, kOnStack, kInputsPushed, kVisited };
+
+  struct Replacement {
+    Node* low;
+    Node* high;
+  };
+
+  Zone* zone() const { return zone_; }
+  Graph* graph() const { return graph_; }
+  MachineOperatorBuilder* machine() const { return machine_; }
+  CommonOperatorBuilder* common() const { return common_; }
+  Signature<MachineRepresentation>* signature() const { return signature_; }
+
+  void LowerNode(Node* node);
+  bool DefaultLowering(Node* node);
+
+  void ReplaceNode(Node* old, Node* new_low, Node* new_high);
+  bool HasReplacementLow(Node* node);
+  Node* GetReplacementLow(Node* node);
+  bool HasReplacementHigh(Node* node);
+  Node* GetReplacementHigh(Node* node);
+
+  Zone* zone_;
+  Graph* const graph_;
+  MachineOperatorBuilder* machine_;
+  CommonOperatorBuilder* common_;
+  NodeMarker<State> state_;
+  ZoneStack<Node*> stack_;
+  Replacement* replacements_;
+  Signature<MachineRepresentation>* signature_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INT64_REDUCER_H_
diff --git a/src/compiler/interpreter-assembler.cc b/src/compiler/interpreter-assembler.cc
deleted file mode 100644
index 7080d02..0000000
--- a/src/compiler/interpreter-assembler.cc
+++ /dev/null
@@ -1,751 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/interpreter-assembler.h"
-
-#include <ostream>
-
-#include "src/code-factory.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "src/compiler/schedule.h"
-#include "src/frames.h"
-#include "src/interface-descriptors.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/machine-type.h"
-#include "src/macro-assembler.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
-                                           interpreter::Bytecode bytecode)
-    : bytecode_(bytecode),
-      raw_assembler_(new RawMachineAssembler(
-          isolate, new (zone) Graph(zone),
-          Linkage::GetInterpreterDispatchDescriptor(zone),
-          MachineType::PointerRepresentation(),
-          InstructionSelector::SupportedMachineOperatorFlags())),
-      accumulator_(
-          raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
-      bytecode_offset_(raw_assembler_->Parameter(
-          Linkage::kInterpreterBytecodeOffsetParameter)),
-      context_(
-          raw_assembler_->Parameter(Linkage::kInterpreterContextParameter)),
-      code_generated_(false) {}
-
-
-InterpreterAssembler::~InterpreterAssembler() {}
-
-
-Handle<Code> InterpreterAssembler::GenerateCode() {
-  DCHECK(!code_generated_);
-
-  // Disallow empty handlers that never return.
-  DCHECK_NE(0, graph()->end()->InputCount());
-
-  const char* bytecode_name = interpreter::Bytecodes::ToString(bytecode_);
-  Schedule* schedule = raw_assembler_->Export();
-  Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
-      isolate(), raw_assembler_->call_descriptor(), graph(), schedule,
-      Code::STUB, bytecode_name);
-
-#ifdef ENABLE_DISASSEMBLER
-  if (FLAG_trace_ignition_codegen) {
-    OFStream os(stdout);
-    code->Disassemble(bytecode_name, os);
-    os << std::flush;
-  }
-#endif
-
-  code_generated_ = true;
-  return code;
-}
-
-
-Node* InterpreterAssembler::GetAccumulator() { return accumulator_; }
-
-
-void InterpreterAssembler::SetAccumulator(Node* value) { accumulator_ = value; }
-
-
-Node* InterpreterAssembler::GetContext() { return context_; }
-
-
-void InterpreterAssembler::SetContext(Node* value) { context_ = value; }
-
-
-Node* InterpreterAssembler::BytecodeOffset() { return bytecode_offset_; }
-
-
-Node* InterpreterAssembler::RegisterFileRawPointer() {
-  return raw_assembler_->Parameter(Linkage::kInterpreterRegisterFileParameter);
-}
-
-
-Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
-  return raw_assembler_->Parameter(Linkage::kInterpreterBytecodeArrayParameter);
-}
-
-
-Node* InterpreterAssembler::DispatchTableRawPointer() {
-  return raw_assembler_->Parameter(Linkage::kInterpreterDispatchTableParameter);
-}
-
-
-Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
-  return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
-}
-
-
-Node* InterpreterAssembler::LoadRegister(int offset) {
-  return raw_assembler_->Load(MachineType::AnyTagged(),
-                              RegisterFileRawPointer(), Int32Constant(offset));
-}
-
-
-Node* InterpreterAssembler::LoadRegister(interpreter::Register reg) {
-  return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
-}
-
-
-Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
-  return WordShl(index, kPointerSizeLog2);
-}
-
-
-Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
-  return raw_assembler_->Load(MachineType::AnyTagged(),
-                              RegisterFileRawPointer(),
-                              RegisterFrameOffset(reg_index));
-}
-
-
-Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
-  return raw_assembler_->Store(MachineRepresentation::kTagged,
-                               RegisterFileRawPointer(), Int32Constant(offset),
-                               value, kNoWriteBarrier);
-}
-
-
-Node* InterpreterAssembler::StoreRegister(Node* value,
-                                          interpreter::Register reg) {
-  return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
-}
-
-
-Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
-  return raw_assembler_->Store(
-      MachineRepresentation::kTagged, RegisterFileRawPointer(),
-      RegisterFrameOffset(reg_index), value, kNoWriteBarrier);
-}
-
-
-Node* InterpreterAssembler::NextRegister(Node* reg_index) {
-  // Register indexes are negative, so the next index is minus one.
-  return IntPtrAdd(reg_index, Int32Constant(-1));
-}
-
-
-Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
-  DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(interpreter::OperandSize::kByte,
-            interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-  return raw_assembler_->Load(
-      MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-      IntPtrAdd(BytecodeOffset(),
-                Int32Constant(interpreter::Bytecodes::GetOperandOffset(
-                    bytecode_, operand_index))));
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
-  DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(interpreter::OperandSize::kByte,
-            interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-  Node* load = raw_assembler_->Load(
-      MachineType::Int8(), BytecodeArrayTaggedPointer(),
-      IntPtrAdd(BytecodeOffset(),
-                Int32Constant(interpreter::Bytecodes::GetOperandOffset(
-                    bytecode_, operand_index))));
-  // Ensure that we sign extend to full pointer size
-  if (kPointerSize == 8) {
-    load = raw_assembler_->ChangeInt32ToInt64(load);
-  }
-  return load;
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
-  DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(interpreter::OperandSize::kShort,
-            interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-  if (TargetSupportsUnalignedAccess()) {
-    return raw_assembler_->Load(
-        MachineType::Uint16(), BytecodeArrayTaggedPointer(),
-        IntPtrAdd(BytecodeOffset(),
-                  Int32Constant(interpreter::Bytecodes::GetOperandOffset(
-                      bytecode_, operand_index))));
-  } else {
-    int offset =
-        interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
-    Node* first_byte = raw_assembler_->Load(
-        MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-        IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
-    Node* second_byte = raw_assembler_->Load(
-        MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-        IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
-#if V8_TARGET_LITTLE_ENDIAN
-    return raw_assembler_->WordOr(WordShl(second_byte, kBitsPerByte),
-                                  first_byte);
-#elif V8_TARGET_BIG_ENDIAN
-    return raw_assembler_->WordOr(WordShl(first_byte, kBitsPerByte),
-                                  second_byte);
-#else
-#error "Unknown Architecture"
-#endif
-  }
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
-    int operand_index) {
-  DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
-  DCHECK_EQ(interpreter::OperandSize::kShort,
-            interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-  int operand_offset =
-      interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
-  Node* load;
-  if (TargetSupportsUnalignedAccess()) {
-    load = raw_assembler_->Load(
-        MachineType::Int16(), BytecodeArrayTaggedPointer(),
-        IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
-  } else {
-#if V8_TARGET_LITTLE_ENDIAN
-    Node* hi_byte_offset = Int32Constant(operand_offset + 1);
-    Node* lo_byte_offset = Int32Constant(operand_offset);
-#elif V8_TARGET_BIG_ENDIAN
-    Node* hi_byte_offset = Int32Constant(operand_offset);
-    Node* lo_byte_offset = Int32Constant(operand_offset + 1);
-#else
-#error "Unknown Architecture"
-#endif
-    Node* hi_byte =
-        raw_assembler_->Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
-                             IntPtrAdd(BytecodeOffset(), hi_byte_offset));
-    Node* lo_byte =
-        raw_assembler_->Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
-                             IntPtrAdd(BytecodeOffset(), lo_byte_offset));
-    hi_byte = raw_assembler_->Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
-    load = raw_assembler_->Word32Or(hi_byte, lo_byte);
-  }
-
-  // Ensure that we sign extend to full pointer size
-  if (kPointerSize == 8) {
-    load = raw_assembler_->ChangeInt32ToInt64(load);
-  }
-  return load;
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
-  switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
-    case interpreter::OperandSize::kByte:
-      DCHECK_EQ(
-          interpreter::OperandType::kCount8,
-          interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperand(operand_index);
-    case interpreter::OperandSize::kShort:
-      DCHECK_EQ(
-          interpreter::OperandType::kCount16,
-          interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperandShort(operand_index);
-    default:
-      UNREACHABLE();
-      return nullptr;
-  }
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
-  DCHECK_EQ(interpreter::OperandType::kImm8,
-            interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
-  return BytecodeOperandSignExtended(operand_index);
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
-  switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
-    case interpreter::OperandSize::kByte:
-      DCHECK_EQ(
-          interpreter::OperandType::kIdx8,
-          interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperand(operand_index);
-    case interpreter::OperandSize::kShort:
-      DCHECK_EQ(
-          interpreter::OperandType::kIdx16,
-          interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
-      return BytecodeOperandShort(operand_index);
-    default:
-      UNREACHABLE();
-      return nullptr;
-  }
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
-  switch (interpreter::Bytecodes::GetOperandType(bytecode_, operand_index)) {
-    case interpreter::OperandType::kReg8:
-    case interpreter::OperandType::kRegPair8:
-    case interpreter::OperandType::kMaybeReg8:
-      DCHECK_EQ(
-          interpreter::OperandSize::kByte,
-          interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-      return BytecodeOperandSignExtended(operand_index);
-    case interpreter::OperandType::kReg16:
-      DCHECK_EQ(
-          interpreter::OperandSize::kShort,
-          interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
-      return BytecodeOperandShortSignExtended(operand_index);
-    default:
-      UNREACHABLE();
-      return nullptr;
-  }
-}
-
-
-Node* InterpreterAssembler::Int32Constant(int value) {
-  return raw_assembler_->Int32Constant(value);
-}
-
-
-Node* InterpreterAssembler::IntPtrConstant(intptr_t value) {
-  return raw_assembler_->IntPtrConstant(value);
-}
-
-
-Node* InterpreterAssembler::NumberConstant(double value) {
-  return raw_assembler_->NumberConstant(value);
-}
-
-
-Node* InterpreterAssembler::HeapConstant(Handle<HeapObject> object) {
-  return raw_assembler_->HeapConstant(object);
-}
-
-
-Node* InterpreterAssembler::BooleanConstant(bool value) {
-  return raw_assembler_->BooleanConstant(value);
-}
-
-
-Node* InterpreterAssembler::SmiShiftBitsConstant() {
-  return Int32Constant(kSmiShiftSize + kSmiTagSize);
-}
-
-
-Node* InterpreterAssembler::SmiTag(Node* value) {
-  return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
-}
-
-
-Node* InterpreterAssembler::SmiUntag(Node* value) {
-  return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
-}
-
-
-Node* InterpreterAssembler::IntPtrAdd(Node* a, Node* b) {
-  return raw_assembler_->IntPtrAdd(a, b);
-}
-
-
-Node* InterpreterAssembler::IntPtrSub(Node* a, Node* b) {
-  return raw_assembler_->IntPtrSub(a, b);
-}
-
-
-Node* InterpreterAssembler::WordShl(Node* value, int shift) {
-  return raw_assembler_->WordShl(value, Int32Constant(shift));
-}
-
-
-Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
-  Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
-                                        BytecodeArray::kConstantPoolOffset);
-  Node* entry_offset =
-      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
-                WordShl(index, kPointerSizeLog2));
-  return raw_assembler_->Load(MachineType::AnyTagged(), constant_pool,
-                              entry_offset);
-}
-
-
-Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
-                                                  int index) {
-  Node* entry_offset =
-      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
-                WordShl(Int32Constant(index), kPointerSizeLog2));
-  return raw_assembler_->Load(MachineType::AnyTagged(), fixed_array,
-                              entry_offset);
-}
-
-
-Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
-  return raw_assembler_->Load(MachineType::AnyTagged(), object,
-                              IntPtrConstant(offset - kHeapObjectTag));
-}
-
-
-Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
-  return raw_assembler_->Load(MachineType::AnyTagged(), context,
-                              IntPtrConstant(Context::SlotOffset(slot_index)));
-}
-
-
-Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
-  Node* offset =
-      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
-  return raw_assembler_->Load(MachineType::AnyTagged(), context, offset);
-}
-
-
-Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
-                                             Node* value) {
-  Node* offset =
-      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
-                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
-  return raw_assembler_->Store(MachineRepresentation::kTagged, context, offset,
-                               value, kFullWriteBarrier);
-}
-
-
-Node* InterpreterAssembler::LoadTypeFeedbackVector() {
-  Node* function = raw_assembler_->Load(
-      MachineType::AnyTagged(), RegisterFileRawPointer(),
-      IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
-  Node* shared_info =
-      LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
-  Node* vector =
-      LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
-  return vector;
-}
-
-
-Node* InterpreterAssembler::Projection(int index, Node* node) {
-  return raw_assembler_->Projection(index, node);
-}
-
-
-Node* InterpreterAssembler::CallConstruct(Node* new_target, Node* constructor,
-                                          Node* first_arg, Node* arg_count) {
-  Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
-  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
-
-  Node* code_target = HeapConstant(callable.code());
-
-  Node** args = zone()->NewArray<Node*>(5);
-  args[0] = arg_count;
-  args[1] = new_target;
-  args[2] = constructor;
-  args[3] = first_arg;
-  args[4] = GetContext();
-
-  return CallN(descriptor, code_target, args);
-}
-
-
-void InterpreterAssembler::CallPrologue() {
-  StoreRegister(SmiTag(bytecode_offset_),
-                InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
-}
-
-
-void InterpreterAssembler::CallEpilogue() {
-  // Restore the bytecode offset from the stack frame.
-  bytecode_offset_ = SmiUntag(LoadRegister(
-      InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
-}
-
-
-Node* InterpreterAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
-                                  Node** args) {
-  CallPrologue();
-
-  Node* stack_pointer_before_call = nullptr;
-  if (FLAG_debug_code) {
-    stack_pointer_before_call = raw_assembler_->LoadStackPointer();
-  }
-  Node* return_val = raw_assembler_->CallN(descriptor, code_target, args);
-  if (FLAG_debug_code) {
-    Node* stack_pointer_after_call = raw_assembler_->LoadStackPointer();
-    AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
-                        kUnexpectedStackPointer);
-  }
-
-  CallEpilogue();
-  return return_val;
-}
-
-
-Node* InterpreterAssembler::CallJS(Node* function, Node* first_arg,
-                                   Node* arg_count) {
-  Callable callable = CodeFactory::InterpreterPushArgsAndCall(isolate());
-  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
-
-  Node* code_target = HeapConstant(callable.code());
-
-  Node** args = zone()->NewArray<Node*>(4);
-  args[0] = arg_count;
-  args[1] = first_arg;
-  args[2] = function;
-  args[3] = GetContext();
-
-  return CallN(descriptor, code_target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
-                                   Node* target, Node** args) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, 0, CallDescriptor::kNoFlags);
-  return CallN(call_descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
-                                   Node* target, Node* arg1, Node* arg2,
-                                   Node* arg3) {
-  Node** args = zone()->NewArray<Node*>(4);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = GetContext();
-  return CallIC(descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
-                                   Node* target, Node* arg1, Node* arg2,
-                                   Node* arg3, Node* arg4) {
-  Node** args = zone()->NewArray<Node*>(5);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = GetContext();
-  return CallIC(descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
-                                   Node* target, Node* arg1, Node* arg2,
-                                   Node* arg3, Node* arg4, Node* arg5) {
-  Node** args = zone()->NewArray<Node*>(6);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = arg5;
-  args[5] = GetContext();
-  return CallIC(descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Node* function_id, Node* first_arg,
-                                        Node* arg_count, int result_size) {
-  Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
-  CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
-      Operator::kNoProperties, MachineType::AnyTagged(), result_size);
-  Node* code_target = HeapConstant(callable.code());
-
-  // Get the function entry from the function id.
-  Node* function_table = raw_assembler_->ExternalConstant(
-      ExternalReference::runtime_function_table_address(isolate()));
-  Node* function_offset = raw_assembler_->Int32Mul(
-      function_id, Int32Constant(sizeof(Runtime::Function)));
-  Node* function = IntPtrAdd(function_table, function_offset);
-  Node* function_entry =
-      raw_assembler_->Load(MachineType::Pointer(), function,
-                           Int32Constant(offsetof(Runtime::Function, entry)));
-
-  Node** args = zone()->NewArray<Node*>(4);
-  args[0] = arg_count;
-  args[1] = first_arg;
-  args[2] = function_entry;
-  args[3] = GetContext();
-
-  return CallN(descriptor, code_target, args);
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                        Node* arg1) {
-  CallPrologue();
-  Node* return_val =
-      raw_assembler_->CallRuntime1(function_id, arg1, GetContext());
-  CallEpilogue();
-  return return_val;
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                        Node* arg1, Node* arg2) {
-  CallPrologue();
-  Node* return_val =
-      raw_assembler_->CallRuntime2(function_id, arg1, arg2, GetContext());
-  CallEpilogue();
-  return return_val;
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                        Node* arg1, Node* arg2, Node* arg3,
-                                        Node* arg4) {
-  CallPrologue();
-  Node* return_val = raw_assembler_->CallRuntime4(function_id, arg1, arg2, arg3,
-                                                  arg4, GetContext());
-  CallEpilogue();
-  return return_val;
-}
-
-
-void InterpreterAssembler::Return() {
-  Node* exit_trampoline_code_object =
-      HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
-  // If the order of the parameters you need to change the call signature below.
-  STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
-  STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
-  STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
-  STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
-  STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
-  STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
-  Node* args[] = { GetAccumulator(),
-                   RegisterFileRawPointer(),
-                   BytecodeOffset(),
-                   BytecodeArrayTaggedPointer(),
-                   DispatchTableRawPointer(),
-                   GetContext() };
-  raw_assembler_->TailCallN(call_descriptor(), exit_trampoline_code_object,
-                            args);
-}
-
-
-Node* InterpreterAssembler::Advance(int delta) {
-  return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
-}
-
-
-Node* InterpreterAssembler::Advance(Node* delta) {
-  return raw_assembler_->IntPtrAdd(BytecodeOffset(), delta);
-}
-
-
-void InterpreterAssembler::Jump(Node* delta) { DispatchTo(Advance(delta)); }
-
-
-void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
-  RawMachineLabel match, no_match;
-  Node* condition = raw_assembler_->WordEqual(lhs, rhs);
-  raw_assembler_->Branch(condition, &match, &no_match);
-  raw_assembler_->Bind(&match);
-  DispatchTo(Advance(delta));
-  raw_assembler_->Bind(&no_match);
-  Dispatch();
-}
-
-
-void InterpreterAssembler::Dispatch() {
-  DispatchTo(Advance(interpreter::Bytecodes::Size(bytecode_)));
-}
-
-
-void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
-  Node* target_bytecode = raw_assembler_->Load(
-      MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
-
-  // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
-  // from code object on every dispatch.
-  Node* target_code_object = raw_assembler_->Load(
-      MachineType::Pointer(), DispatchTableRawPointer(),
-      raw_assembler_->Word32Shl(target_bytecode,
-                                Int32Constant(kPointerSizeLog2)));
-
-  // If the order of the parameters you need to change the call signature below.
-  STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
-  STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
-  STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
-  STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
-  STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
-  STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
-  Node* args[] = { GetAccumulator(),
-                   RegisterFileRawPointer(),
-                   new_bytecode_offset,
-                   BytecodeArrayTaggedPointer(),
-                   DispatchTableRawPointer(),
-                   GetContext() };
-  raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
-}
-
-
-void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
-  Node* abort_id = SmiTag(Int32Constant(bailout_reason));
-  Node* ret_value = CallRuntime(Runtime::kAbort, abort_id);
-  // Unreached, but keeps turbofan happy.
-  raw_assembler_->Return(ret_value);
-}
-
-
-void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
-                                               BailoutReason bailout_reason) {
-  RawMachineLabel match, no_match;
-  Node* condition = raw_assembler_->WordEqual(lhs, rhs);
-  raw_assembler_->Branch(condition, &match, &no_match);
-  raw_assembler_->Bind(&no_match);
-  Abort(bailout_reason);
-  raw_assembler_->Bind(&match);
-}
-
-
-// static
-bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-  return false;
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
-  return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
-#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
-  return true;
-#else
-#error "Unknown Architecture"
-#endif
-}
-
-
-// RawMachineAssembler delegate helpers:
-Isolate* InterpreterAssembler::isolate() { return raw_assembler_->isolate(); }
-
-
-Graph* InterpreterAssembler::graph() { return raw_assembler_->graph(); }
-
-
-CallDescriptor* InterpreterAssembler::call_descriptor() const {
-  return raw_assembler_->call_descriptor();
-}
-
-
-Zone* InterpreterAssembler::zone() { return raw_assembler_->zone(); }
-
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/interpreter-assembler.h b/src/compiler/interpreter-assembler.h
deleted file mode 100644
index fb79d3e..0000000
--- a/src/compiler/interpreter-assembler.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_INTERPRETER_ASSEMBLER_H_
-#define V8_COMPILER_INTERPRETER_ASSEMBLER_H_
-
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
-#include "src/allocation.h"
-#include "src/base/smart-pointers.h"
-#include "src/builtins.h"
-#include "src/frames.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/runtime/runtime.h"
-
-namespace v8 {
-namespace internal {
-
-class CallInterfaceDescriptor;
-class Isolate;
-class Zone;
-
-namespace compiler {
-
-class CallDescriptor;
-class Graph;
-class Node;
-class Operator;
-class RawMachineAssembler;
-class Schedule;
-
-class InterpreterAssembler {
- public:
-  InterpreterAssembler(Isolate* isolate, Zone* zone,
-                       interpreter::Bytecode bytecode);
-  virtual ~InterpreterAssembler();
-
-  Handle<Code> GenerateCode();
-
-  // Returns the count immediate for bytecode operand |operand_index| in the
-  // current bytecode.
-  Node* BytecodeOperandCount(int operand_index);
-  // Returns the index immediate for bytecode operand |operand_index| in the
-  // current bytecode.
-  Node* BytecodeOperandIdx(int operand_index);
-  // Returns the Imm8 immediate for bytecode operand |operand_index| in the
-  // current bytecode.
-  Node* BytecodeOperandImm(int operand_index);
-  // Returns the register index for bytecode operand |operand_index| in the
-  // current bytecode.
-  Node* BytecodeOperandReg(int operand_index);
-
-  // Accumulator.
-  Node* GetAccumulator();
-  void SetAccumulator(Node* value);
-
-  // Context.
-  Node* GetContext();
-  void SetContext(Node* value);
-
-  // Loads from and stores to the interpreter register file.
-  Node* LoadRegister(int offset);
-  Node* LoadRegister(interpreter::Register reg);
-  Node* LoadRegister(Node* reg_index);
-  Node* StoreRegister(Node* value, int offset);
-  Node* StoreRegister(Node* value, interpreter::Register reg);
-  Node* StoreRegister(Node* value, Node* reg_index);
-
-  // Returns the next consecutive register.
-  Node* NextRegister(Node* reg_index);
-
-  // Returns the location in memory of the register |reg_index| in the
-  // interpreter register file.
-  Node* RegisterLocation(Node* reg_index);
-
-  // Constants.
-  Node* Int32Constant(int value);
-  Node* IntPtrConstant(intptr_t value);
-  Node* NumberConstant(double value);
-  Node* HeapConstant(Handle<HeapObject> object);
-  Node* BooleanConstant(bool value);
-
-  // Tag and untag Smi values.
-  Node* SmiTag(Node* value);
-  Node* SmiUntag(Node* value);
-
-  // Basic arithmetic operations.
-  Node* IntPtrAdd(Node* a, Node* b);
-  Node* IntPtrSub(Node* a, Node* b);
-  Node* WordShl(Node* value, int shift);
-
-  // Load constant at |index| in the constant pool.
-  Node* LoadConstantPoolEntry(Node* index);
-
-  // Load an element from a fixed array on the heap.
-  Node* LoadFixedArrayElement(Node* fixed_array, int index);
-
-  // Load a field from an object on the heap.
-  Node* LoadObjectField(Node* object, int offset);
-
-  // Load |slot_index| from |context|.
-  Node* LoadContextSlot(Node* context, int slot_index);
-  Node* LoadContextSlot(Node* context, Node* slot_index);
-  // Stores |value| into |slot_index| of |context|.
-  Node* StoreContextSlot(Node* context, Node* slot_index, Node* value);
-
-  // Load the TypeFeedbackVector for the current function.
-  Node* LoadTypeFeedbackVector();
-
-  // Project the output value at index |index|
-  Node* Projection(int index, Node* node);
-
-  // Call constructor |constructor| with |arg_count| arguments (not
-  // including receiver) and the first argument located at
-  // |first_arg|. The |new_target| is the same as the
-  // |constructor| for the new keyword, but differs for the super
-  // keyword.
-  Node* CallConstruct(Node* new_target, Node* constructor, Node* first_arg,
-                      Node* arg_count);
-
-  // Call JSFunction or Callable |function| with |arg_count|
-  // arguments (not including receiver) and the first argument
-  // located at |first_arg|.
-  Node* CallJS(Node* function, Node* first_arg, Node* arg_count);
-
-  // Call an IC code stub.
-  Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
-               Node* arg2, Node* arg3);
-  Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
-               Node* arg2, Node* arg3, Node* arg4);
-  Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
-               Node* arg2, Node* arg3, Node* arg4, Node* arg5);
-
-  // Call runtime function.
-  Node* CallRuntime(Node* function_id, Node* first_arg, Node* arg_count,
-                    int return_size = 1);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2,
-                    Node* arg3, Node* arg4);
-
-  // Jump relative to the current bytecode by |jump_offset|.
-  void Jump(Node* jump_offset);
-
-  // Jump relative to the current bytecode by |jump_offset| if the
-  // word values |lhs| and |rhs| are equal.
-  void JumpIfWordEqual(Node* lhs, Node* rhs, Node* jump_offset);
-
-  // Returns from the function.
-  void Return();
-
-  // Dispatch to the bytecode.
-  void Dispatch();
-
-  // Abort with the given bailout reason.
-  void Abort(BailoutReason bailout_reason);
-
- protected:
-  static bool TargetSupportsUnalignedAccess();
-
-  // Protected helpers (for testing) which delegate to RawMachineAssembler.
-  CallDescriptor* call_descriptor() const;
-  Graph* graph();
-
- private:
-  // Returns a raw pointer to start of the register file on the stack.
-  Node* RegisterFileRawPointer();
-  // Returns a tagged pointer to the current function's BytecodeArray object.
-  Node* BytecodeArrayTaggedPointer();
-  // Returns the offset from the BytecodeArrayPointer of the current bytecode.
-  Node* BytecodeOffset();
-  // Returns a raw pointer to first entry in the interpreter dispatch table.
-  Node* DispatchTableRawPointer();
-
-  // Saves and restores interpreter bytecode offset to the interpreter stack
-  // frame when performing a call.
-  void CallPrologue();
-  void CallEpilogue();
-
-  // Returns the offset of register |index| relative to RegisterFilePointer().
-  Node* RegisterFrameOffset(Node* index);
-
-  Node* SmiShiftBitsConstant();
-  Node* BytecodeOperand(int operand_index);
-  Node* BytecodeOperandSignExtended(int operand_index);
-  Node* BytecodeOperandShort(int operand_index);
-  Node* BytecodeOperandShortSignExtended(int operand_index);
-
-  Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
-  Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node** args);
-
-  // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
-  // update BytecodeOffset() itself.
-  Node* Advance(int delta);
-  Node* Advance(Node* delta);
-
-  // Starts next instruction dispatch at |new_bytecode_offset|.
-  void DispatchTo(Node* new_bytecode_offset);
-
-  // Abort operations for debug code.
-  void AbortIfWordNotEqual(Node* lhs, Node* rhs, BailoutReason bailout_reason);
-
-  // Private helpers which delegate to RawMachineAssembler.
-  Isolate* isolate();
-  Zone* zone();
-
-  interpreter::Bytecode bytecode_;
-  base::SmartPointer<RawMachineAssembler> raw_assembler_;
-
-  Node* accumulator_;
-  Node* bytecode_offset_;
-  Node* context_;
-
-  bool code_generated_;
-
-  DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_INTERPRETER_ASSEMBLER_H_
diff --git a/src/compiler/ir-operations.txt b/src/compiler/ir-operations.txt
deleted file mode 100644
index e69de29..0000000
--- a/src/compiler/ir-operations.txt
+++ /dev/null
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index a7a7da5..3023031 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -8,6 +8,7 @@
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/objects-inl.h"
+#include "src/type-cache.h"
 #include "src/types.h"
 
 namespace v8 {
@@ -85,10 +86,10 @@
   Node* node_;
 };
 
-
 JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph)
-    : AdvancedReducer(editor), jsgraph_(jsgraph) {}
-
+    : AdvancedReducer(editor),
+      jsgraph_(jsgraph),
+      type_cache_(TypeCache::Get()) {}
 
 // ECMA-262, section 15.8.2.11.
 Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
@@ -141,6 +142,31 @@
   return NoChange();
 }
 
+// ES6 section 20.2.2.28 Math.round ( x )
+Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+    // Math.round(a:integer \/ -0 \/ NaN) -> a
+    return Replace(r.left());
+  }
+  if (r.InputsMatchOne(Type::Number()) &&
+      machine()->Float64RoundUp().IsSupported()) {
+    // Math.round(a:number) -> Select(Float64LessThan(#0.5, Float64Sub(i, a)),
+    //                                Float64Sub(i, #1.0), i)
+    //   where i = Float64RoundUp(a)
+    Node* value = r.left();
+    Node* integer = graph()->NewNode(machine()->Float64RoundUp().op(), value);
+    Node* real = graph()->NewNode(machine()->Float64Sub(), integer, value);
+    return Replace(graph()->NewNode(
+        common()->Select(MachineRepresentation::kFloat64),
+        graph()->NewNode(machine()->Float64LessThan(),
+                         jsgraph()->Float64Constant(0.5), real),
+        graph()->NewNode(machine()->Float64Sub(), integer,
+                         jsgraph()->Float64Constant(1.0)),
+        integer));
+  }
+  return NoChange();
+}
 
 Reduction JSBuiltinReducer::Reduce(Node* node) {
   Reduction reduction = NoChange();
@@ -158,6 +184,9 @@
     case kMathFround:
       reduction = ReduceMathFround(node);
       break;
+    case kMathRound:
+      reduction = ReduceMathRound(node);
+      break;
     default:
       break;
   }
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index cfacdc1..b64b335 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -9,6 +9,10 @@
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class TypeCache;
+
 namespace compiler {
 
 // Forward declarations.
@@ -30,6 +34,7 @@
   Reduction ReduceMathMax(Node* node);
   Reduction ReduceMathImul(Node* node);
   Reduction ReduceMathFround(Node* node);
+  Reduction ReduceMathRound(Node* node);
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
@@ -38,7 +43,8 @@
   MachineOperatorBuilder* machine() const;
   SimplifiedOperatorBuilder* simplified() const;
 
-  JSGraph* jsgraph_;
+  JSGraph* const jsgraph_;
+  TypeCache const& type_cache_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index a15d6fd..34217e7 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -129,8 +129,7 @@
     // Get to the actual frame state from which to extract the arguments;
     // we can only optimize this in case the {node} was already inlined into
     // some other function (and same for the {arg_array}).
-    CreateArgumentsParameters const& p =
-        CreateArgumentsParametersOf(arg_array->op());
+    CreateArgumentsType type = CreateArgumentsTypeOf(arg_array->op());
     Node* frame_state = NodeProperties::GetFrameStateInput(arg_array, 0);
     Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
     if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
@@ -140,17 +139,22 @@
       frame_state = outer_state;
     }
     FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-    if (p.type() == CreateArgumentsParameters::kMappedArguments) {
+    int start_index = 0;
+    if (type == CreateArgumentsType::kMappedArguments) {
       // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
       Handle<SharedFunctionInfo> shared;
       if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
       if (shared->internal_formal_parameter_count() != 0) return NoChange();
+    } else if (type == CreateArgumentsType::kRestParameter) {
+      Handle<SharedFunctionInfo> shared;
+      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+      start_index = shared->internal_formal_parameter_count();
     }
     // Remove the argArray input from the {node}.
     node->RemoveInput(static_cast<int>(--arity));
     // Add the actual parameters to the {node}, skipping the receiver.
     Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
-    for (int i = p.start_index() + 1; i < state_info.parameter_count(); ++i) {
+    for (int i = start_index + 1; i < state_info.parameter_count(); ++i) {
       node->InsertInput(graph()->zone(), static_cast<int>(arity),
                         parameters->InputAt(i));
       ++arity;
@@ -163,8 +167,7 @@
   }
   // Change {node} to the new {JSCallFunction} operator.
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, p.language_mode(),
-                                       CallCountFeedback(p.feedback()),
+      node, javascript()->CallFunction(arity, CallCountFeedback(p.feedback()),
                                        convert_mode, p.tail_call_mode()));
   // Change context of {node} to the Function.prototype.apply context,
   // to ensure any exception is thrown in the correct context.
@@ -204,8 +207,7 @@
     --arity;
   }
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, p.language_mode(),
-                                       CallCountFeedback(p.feedback()),
+      node, javascript()->CallFunction(arity, CallCountFeedback(p.feedback()),
                                        convert_mode, p.tail_call_mode()));
   // Try to further reduce the JSCallFunction {node}.
   Reduction const reduction = ReduceJSCallFunction(node);
@@ -287,10 +289,9 @@
             jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
         arity++;
       }
-      NodeProperties::ChangeOp(
-          node, javascript()->CallFunction(arity, p.language_mode(),
-                                           CallCountFeedback(p.feedback()),
-                                           convert_mode, p.tail_call_mode()));
+      NodeProperties::ChangeOp(node, javascript()->CallFunction(
+                                         arity, CallCountFeedback(p.feedback()),
+                                         convert_mode, p.tail_call_mode()));
       // Try to further reduce the JSCallFunction {node}.
       Reduction const reduction = ReduceJSCallFunction(node);
       return reduction.Changed() ? reduction : Changed(node);
@@ -336,6 +337,7 @@
                          frame_state, effect, if_false);
     // TODO(bmeurer): This should be on the AdvancedReducer somehow.
     NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+    Revisit(graph()->end());
     control = graph()->NewNode(common()->IfTrue(), branch);
 
     // Turn the {node} into a {JSCreateArray} call.
@@ -361,6 +363,7 @@
                            frame_state, effect, if_false);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+      Revisit(graph()->end());
       control = graph()->NewNode(common()->IfTrue(), branch);
 
       // Specialize the JSCallFunction node to the {target_function}.
@@ -404,8 +407,7 @@
         NodeProperties::RemoveFrameStateInput(node, 0);
         NodeProperties::ReplaceValueInputs(node, target);
         NodeProperties::ChangeOp(
-            node,
-            javascript()->CallRuntime(Runtime::kThrowCalledNonCallable, 1));
+            node, javascript()->CallRuntime(Runtime::kThrowCalledNonCallable));
         return Changed(node);
       }
 
@@ -479,6 +481,7 @@
                          frame_state, effect, if_false);
     // TODO(bmeurer): This should be on the AdvancedReducer somehow.
     NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+    Revisit(graph()->end());
     control = graph()->NewNode(common()->IfTrue(), branch);
 
     // Turn the {node} into a {JSCreateArray} call.
@@ -510,6 +513,7 @@
                            frame_state, effect, if_false);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+      Revisit(graph()->end());
       control = graph()->NewNode(common()->IfTrue(), branch);
 
       // Specialize the JSCallConstruct node to the {target_function}.
diff --git a/src/compiler/js-call-reducer.h b/src/compiler/js-call-reducer.h
index 9ffae15..f40f05d 100644
--- a/src/compiler/js-call-reducer.h
+++ b/src/compiler/js-call-reducer.h
@@ -20,7 +20,7 @@
 
 // Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
 // which might allow inlining or other optimizations to be performed afterwards.
-class JSCallReducer final : public Reducer {
+class JSCallReducer final : public AdvancedReducer {
  public:
   // Flags that control the mode of operation.
   enum Flag {
@@ -29,9 +29,12 @@
   };
   typedef base::Flags<Flag> Flags;
 
-  JSCallReducer(JSGraph* jsgraph, Flags flags,
+  JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
                 MaybeHandle<Context> native_context)
-      : jsgraph_(jsgraph), flags_(flags), native_context_(native_context) {}
+      : AdvancedReducer(editor),
+        jsgraph_(jsgraph),
+        flags_(flags),
+        native_context_(native_context) {}
 
   Reduction Reduce(Node* node) final;
 
diff --git a/src/compiler/js-context-relaxation.cc b/src/compiler/js-context-relaxation.cc
deleted file mode 100644
index 0ca3c0c..0000000
--- a/src/compiler/js-context-relaxation.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/frame-states.h"
-#include "src/compiler/js-context-relaxation.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/node.h"
-#include "src/compiler/node-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-Reduction JSContextRelaxation::Reduce(Node* node) {
-  switch (node->opcode()) {
-    case IrOpcode::kJSCallFunction:
-    case IrOpcode::kJSToNumber: {
-      Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
-      Node* outer_frame = frame_state;
-      Node* original_context = NodeProperties::GetContextInput(node);
-      Node* candidate_new_context = original_context;
-      do {
-        FrameStateInfo frame_state_info(
-            OpParameter<FrameStateInfo>(outer_frame->op()));
-        const FrameStateFunctionInfo* function_info =
-            frame_state_info.function_info();
-        if (function_info == nullptr ||
-            (function_info->context_calling_mode() ==
-             CALL_CHANGES_NATIVE_CONTEXT)) {
-          break;
-        }
-        candidate_new_context = outer_frame->InputAt(kFrameStateContextInput);
-        outer_frame = outer_frame->InputAt(kFrameStateOuterStateInput);
-      } while (outer_frame->opcode() == IrOpcode::kFrameState);
-
-      while (true) {
-        switch (candidate_new_context->opcode()) {
-          case IrOpcode::kParameter:
-          case IrOpcode::kJSCreateModuleContext:
-          case IrOpcode::kJSCreateScriptContext:
-            if (candidate_new_context != original_context) {
-              NodeProperties::ReplaceContextInput(node, candidate_new_context);
-              return Changed(node);
-            } else {
-              return NoChange();
-            }
-          case IrOpcode::kJSCreateCatchContext:
-          case IrOpcode::kJSCreateWithContext:
-          case IrOpcode::kJSCreateBlockContext:
-            candidate_new_context =
-                NodeProperties::GetContextInput(candidate_new_context);
-            break;
-          default:
-            return NoChange();
-        }
-      }
-    }
-    default:
-      break;
-  }
-  return NoChange();
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/js-context-relaxation.h b/src/compiler/js-context-relaxation.h
deleted file mode 100644
index 4320e92..0000000
--- a/src/compiler/js-context-relaxation.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_CONTEXT_RELAXATION_H_
-#define V8_COMPILER_JS_CONTEXT_RELAXATION_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Ensures that operations that only need to access the native context use the
-// outer-most context rather than the specific context given by the AST graph
-// builder. This makes it possible to use these operations with context
-// specialization (e.g. for generating stubs) without forcing inner contexts to
-// be embedded in generated code thus causing leaks and potentially using the
-// wrong native context (i.e. stubs are shared between native contexts).
-class JSContextRelaxation final : public Reducer {
- public:
-  JSContextRelaxation() {}
-  ~JSContextRelaxation() final {}
-
-  Reduction Reduce(Node* node) final;
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_JS_CONTEXT_RELAXATION_H_
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
new file mode 100644
index 0000000..df5c8d0
--- /dev/null
+++ b/src/compiler/js-create-lowering.cc
@@ -0,0 +1,1096 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-create-lowering.h"
+
+#include "src/allocation-site-scopes.h"
+#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/state-values-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// A helper class to construct inline allocations on the simplified operator
+// level. This keeps track of the effect chain for initial stores on a newly
+// allocated object and also provides helpers for commonly allocated objects.
+class AllocationBuilder final {
+ public:
+  AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
+      : jsgraph_(jsgraph),
+        allocation_(nullptr),
+        effect_(effect),
+        control_(control) {}
+
+  // Primitive allocation of static size.
+  void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
+    effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
+    allocation_ =
+        graph()->NewNode(simplified()->Allocate(pretenure),
+                         jsgraph()->Constant(size), effect_, control_);
+    effect_ = allocation_;
+  }
+
+  // Primitive store into a field.
+  void Store(const FieldAccess& access, Node* value) {
+    effect_ = graph()->NewNode(simplified()->StoreField(access), allocation_,
+                               value, effect_, control_);
+  }
+
+  // Primitive store into an element.
+  void Store(ElementAccess const& access, Node* index, Node* value) {
+    effect_ = graph()->NewNode(simplified()->StoreElement(access), allocation_,
+                               index, value, effect_, control_);
+  }
+
+  // Compound allocation of a FixedArray.
+  void AllocateArray(int length, Handle<Map> map,
+                     PretenureFlag pretenure = NOT_TENURED) {
+    DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
+           map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+    int size = (map->instance_type() == FIXED_ARRAY_TYPE)
+                   ? FixedArray::SizeFor(length)
+                   : FixedDoubleArray::SizeFor(length);
+    Allocate(size, pretenure);
+    Store(AccessBuilder::ForMap(), map);
+    Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
+  }
+
+  // Compound store of a constant into a field.
+  void Store(const FieldAccess& access, Handle<Object> value) {
+    Store(access, jsgraph()->Constant(value));
+  }
+
+  void FinishAndChange(Node* node) {
+    NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
+    node->ReplaceInput(0, allocation_);
+    node->ReplaceInput(1, effect_);
+    node->TrimInputCount(2);
+    NodeProperties::ChangeOp(node, common()->FinishRegion());
+  }
+
+  Node* Finish() {
+    return graph()->NewNode(common()->FinishRegion(), allocation_, effect_);
+  }
+
+ protected:
+  JSGraph* jsgraph() { return jsgraph_; }
+  Graph* graph() { return jsgraph_->graph(); }
+  CommonOperatorBuilder* common() { return jsgraph_->common(); }
+  SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
+
+ private:
+  JSGraph* const jsgraph_;
+  Node* allocation_;
+  Node* effect_;
+  Node* control_;
+};
+
+// Retrieves the frame state holding actual argument values.
+Node* GetArgumentsFrameState(Node* frame_state) {
+  Node* const outer_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+  FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
+  return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
+             ? outer_state
+             : frame_state;
+}
+
+// Checks whether allocation using the given target and new.target can be
+// inlined.
+bool IsAllocationInlineable(Handle<JSFunction> target,
+                            Handle<JSFunction> new_target) {
+  return new_target->has_initial_map() &&
+         new_target->initial_map()->constructor_or_backpointer() == *target;
+}
+
+// When initializing arrays, we'll unfold the loop if the number of
+// elements is known to be of this type.
+const int kElementLoopUnrollLimit = 16;
+
+// Limits up to which context allocations are inlined.
+const int kFunctionContextAllocationLimit = 16;
+const int kBlockContextAllocationLimit = 16;
+
+// Determines whether the given array or object literal boilerplate satisfies
+// all limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
+                   int* max_properties) {
+  DCHECK_GE(max_depth, 0);
+  DCHECK_GE(*max_properties, 0);
+
+  // Make sure the boilerplate map is not deprecated.
+  if (!JSObject::TryMigrateInstance(boilerplate)) return false;
+
+  // Check for too deep nesting.
+  if (max_depth == 0) return false;
+
+  // Check the elements.
+  Isolate* const isolate = boilerplate->GetIsolate();
+  Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
+  if (elements->length() > 0 &&
+      elements->map() != isolate->heap()->fixed_cow_array_map()) {
+    if (boilerplate->HasFastSmiOrObjectElements()) {
+      Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+      int length = elements->length();
+      for (int i = 0; i < length; i++) {
+        if ((*max_properties)-- == 0) return false;
+        Handle<Object> value(fast_elements->get(i), isolate);
+        if (value->IsJSObject()) {
+          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+          if (!IsFastLiteral(value_object, max_depth - 1, max_properties)) {
+            return false;
+          }
+        }
+      }
+    } else if (!boilerplate->HasFastDoubleElements()) {
+      return false;
+    }
+  }
+
+  // TODO(turbofan): Do we want to support out-of-object properties?
+  Handle<FixedArray> properties(boilerplate->properties(), isolate);
+  if (properties->length() > 0) return false;
+
+  // Check the in-object properties.
+  Handle<DescriptorArray> descriptors(
+      boilerplate->map()->instance_descriptors(), isolate);
+  int limit = boilerplate->map()->NumberOfOwnDescriptors();
+  for (int i = 0; i < limit; i++) {
+    PropertyDetails details = descriptors->GetDetails(i);
+    if (details.type() != DATA) continue;
+    if ((*max_properties)-- == 0) return false;
+    FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
+    if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
+    Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      if (!IsFastLiteral(value_object, max_depth - 1, max_properties)) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+// Maximum depth and total number of elements and properties for literal
+// graphs to be considered for fast deep-copying.
+const int kMaxFastLiteralDepth = 3;
+const int kMaxFastLiteralProperties = 8;
+
+}  // namespace
+
+Reduction JSCreateLowering::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kJSCreate:
+      return ReduceJSCreate(node);
+    case IrOpcode::kJSCreateArguments:
+      return ReduceJSCreateArguments(node);
+    case IrOpcode::kJSCreateArray:
+      return ReduceJSCreateArray(node);
+    case IrOpcode::kJSCreateIterResultObject:
+      return ReduceJSCreateIterResultObject(node);
+    case IrOpcode::kJSCreateLiteralArray:
+    case IrOpcode::kJSCreateLiteralObject:
+      return ReduceJSCreateLiteral(node);
+    case IrOpcode::kJSCreateFunctionContext:
+      return ReduceJSCreateFunctionContext(node);
+    case IrOpcode::kJSCreateWithContext:
+      return ReduceJSCreateWithContext(node);
+    case IrOpcode::kJSCreateCatchContext:
+      return ReduceJSCreateCatchContext(node);
+    case IrOpcode::kJSCreateBlockContext:
+      return ReduceJSCreateBlockContext(node);
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
+  Node* const target = NodeProperties::GetValueInput(node, 0);
+  Type* const target_type = NodeProperties::GetType(target);
+  Node* const new_target = NodeProperties::GetValueInput(node, 1);
+  Type* const new_target_type = NodeProperties::GetType(new_target);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  // Extract constructor and original constructor function.
+  if (target_type->IsConstant() &&
+      new_target_type->IsConstant() &&
+      new_target_type->AsConstant()->Value()->IsJSFunction()) {
+    Handle<JSFunction> constructor =
+        Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+    Handle<JSFunction> original_constructor =
+        Handle<JSFunction>::cast(new_target_type->AsConstant()->Value());
+    DCHECK(constructor->IsConstructor());
+    DCHECK(original_constructor->IsConstructor());
+
+    // Check if we can inline the allocation.
+    if (IsAllocationInlineable(constructor, original_constructor)) {
+      // Force completion of inobject slack tracking before
+      // generating code to finalize the instance size.
+      original_constructor->CompleteInobjectSlackTrackingIfActive();
+
+      // Compute instance size from initial map of {original_constructor}.
+      Handle<Map> initial_map(original_constructor->initial_map(), isolate());
+      int const instance_size = initial_map->instance_size();
+
+      // Add a dependency on the {initial_map} to make sure that this code is
+      // deoptimized whenever the {initial_map} of the {original_constructor}
+      // changes.
+      dependencies()->AssumeInitialMapCantChange(initial_map);
+
+      // Emit code to allocate the JSObject instance for the
+      // {original_constructor}.
+      AllocationBuilder a(jsgraph(), effect, graph()->start());
+      a.Allocate(instance_size);
+      a.Store(AccessBuilder::ForMap(), initial_map);
+      a.Store(AccessBuilder::ForJSObjectProperties(),
+              jsgraph()->EmptyFixedArrayConstant());
+      a.Store(AccessBuilder::ForJSObjectElements(),
+              jsgraph()->EmptyFixedArrayConstant());
+      for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+        a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+                jsgraph()->UndefinedConstant());
+      }
+      a.FinishAndChange(node);
+      return Changed(node);
+    }
+  }
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
+  CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
+  Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+
+  // Use the ArgumentsAccessStub for materializing both mapped and unmapped
+  // arguments object, but only for non-inlined (i.e. outermost) frames.
+  if (outer_state->opcode() != IrOpcode::kFrameState) {
+    switch (type) {
+      case CreateArgumentsType::kMappedArguments: {
+        // TODO(mstarzinger): Duplicate parameters are not handled yet.
+        Handle<SharedFunctionInfo> shared_info;
+        if (!state_info.shared_info().ToHandle(&shared_info) ||
+            shared_info->has_duplicate_parameters()) {
+          return NoChange();
+        }
+        // TODO(bmeurer): Actually we don't need a frame state here.
+        Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+            isolate(), graph()->zone(), callable.descriptor(), 0,
+            CallDescriptor::kNeedsFrameState);
+        const Operator* new_op = common()->Call(desc);
+        Node* stub_code = jsgraph()->HeapConstant(callable.code());
+        node->InsertInput(graph()->zone(), 0, stub_code);
+        NodeProperties::ChangeOp(node, new_op);
+        return Changed(node);
+      }
+      case CreateArgumentsType::kUnmappedArguments: {
+        // TODO(bmeurer): Actually we don't need a frame state here.
+        Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+            isolate(), graph()->zone(), callable.descriptor(), 0,
+            CallDescriptor::kNeedsFrameState);
+        const Operator* new_op = common()->Call(desc);
+        Node* stub_code = jsgraph()->HeapConstant(callable.code());
+        node->InsertInput(graph()->zone(), 0, stub_code);
+        NodeProperties::ChangeOp(node, new_op);
+        return Changed(node);
+      }
+      case CreateArgumentsType::kRestParameter: {
+        // TODO(bmeurer): Actually we don't need a frame state here.
+        Callable callable = CodeFactory::FastNewRestParameter(isolate());
+        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+            isolate(), graph()->zone(), callable.descriptor(), 0,
+            CallDescriptor::kNeedsFrameState);
+        const Operator* new_op = common()->Call(desc);
+        Node* stub_code = jsgraph()->HeapConstant(callable.code());
+        node->InsertInput(graph()->zone(), 0, stub_code);
+        NodeProperties::ChangeOp(node, new_op);
+        return Changed(node);
+      }
+    }
+    UNREACHABLE();
+  } else if (outer_state->opcode() == IrOpcode::kFrameState) {
+    // Use inline allocation for all mapped arguments objects within inlined
+    // (i.e. non-outermost) frames, independent of the object size.
+    if (type == CreateArgumentsType::kMappedArguments) {
+      Handle<SharedFunctionInfo> shared;
+      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+      Node* const callee = NodeProperties::GetValueInput(node, 0);
+      Node* const control = NodeProperties::GetControlInput(node);
+      Node* const context = NodeProperties::GetContextInput(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      // TODO(mstarzinger): Duplicate parameters are not handled yet.
+      if (shared->has_duplicate_parameters()) return NoChange();
+      // Choose the correct frame state and frame state info depending on
+      // whether there conceptually is an arguments adaptor frame in the call
+      // chain.
+      Node* const args_state = GetArgumentsFrameState(frame_state);
+      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+      // Prepare element backing store to be used by arguments object.
+      bool has_aliased_arguments = false;
+      Node* const elements = AllocateAliasedArguments(
+          effect, control, args_state, context, shared, &has_aliased_arguments);
+      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+      // Load the arguments object map from the current native context.
+      Node* const load_native_context = effect = graph()->NewNode(
+          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+          context, context, effect);
+      Node* const load_arguments_map = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForContextSlot(
+              has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
+                                    : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
+          load_native_context, effect, control);
+      // Actually allocate and initialize the arguments object.
+      AllocationBuilder a(jsgraph(), effect, control);
+      Node* properties = jsgraph()->EmptyFixedArrayConstant();
+      int length = args_state_info.parameter_count() - 1;  // Minus receiver.
+      STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
+      a.Allocate(JSSloppyArgumentsObject::kSize);
+      a.Store(AccessBuilder::ForMap(), load_arguments_map);
+      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+      a.Store(AccessBuilder::ForJSObjectElements(), elements);
+      a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+      a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+      RelaxControls(node);
+      a.FinishAndChange(node);
+      return Changed(node);
+    } else if (type == CreateArgumentsType::kUnmappedArguments) {
+      // Use inline allocation for all unmapped arguments objects within inlined
+      // (i.e. non-outermost) frames, independent of the object size.
+      Node* const control = NodeProperties::GetControlInput(node);
+      Node* const context = NodeProperties::GetContextInput(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      // Choose the correct frame state and frame state info depending on
+      // whether there conceptually is an arguments adaptor frame in the call
+      // chain.
+      Node* const args_state = GetArgumentsFrameState(frame_state);
+      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+      // Prepare element backing store to be used by arguments object.
+      Node* const elements = AllocateArguments(effect, control, args_state);
+      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+      // Load the arguments object map from the current native context.
+      Node* const load_native_context = effect = graph()->NewNode(
+          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+          context, context, effect);
+      Node* const load_arguments_map = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForContextSlot(
+              Context::STRICT_ARGUMENTS_MAP_INDEX)),
+          load_native_context, effect, control);
+      // Actually allocate and initialize the arguments object.
+      AllocationBuilder a(jsgraph(), effect, control);
+      Node* properties = jsgraph()->EmptyFixedArrayConstant();
+      int length = args_state_info.parameter_count() - 1;  // Minus receiver.
+      STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+      a.Allocate(JSStrictArgumentsObject::kSize);
+      a.Store(AccessBuilder::ForMap(), load_arguments_map);
+      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+      a.Store(AccessBuilder::ForJSObjectElements(), elements);
+      a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+      RelaxControls(node);
+      a.FinishAndChange(node);
+      return Changed(node);
+    } else if (type == CreateArgumentsType::kRestParameter) {
+      Handle<SharedFunctionInfo> shared;
+      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+      int start_index = shared->internal_formal_parameter_count();
+      // Use inline allocation for all unmapped arguments objects within inlined
+      // (i.e. non-outermost) frames, independent of the object size.
+      Node* const control = NodeProperties::GetControlInput(node);
+      Node* const context = NodeProperties::GetContextInput(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      // Choose the correct frame state and frame state info depending on
+      // whether there conceptually is an arguments adaptor frame in the call
+      // chain.
+      Node* const args_state = GetArgumentsFrameState(frame_state);
+      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+      // Prepare element backing store to be used by the rest array.
+      Node* const elements =
+          AllocateRestArguments(effect, control, args_state, start_index);
+      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+      // Load the JSArray object map from the current native context.
+      Node* const load_native_context = effect = graph()->NewNode(
+          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+          context, context, effect);
+      Node* const load_jsarray_map = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForContextSlot(
+              Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX)),
+          load_native_context, effect, control);
+      // Actually allocate and initialize the jsarray.
+      AllocationBuilder a(jsgraph(), effect, control);
+      Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+      // -1 to minus receiver
+      int argument_count = args_state_info.parameter_count() - 1;
+      int length = std::max(0, argument_count - start_index);
+      STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+      a.Allocate(JSArray::kSize);
+      a.Store(AccessBuilder::ForMap(), load_jsarray_map);
+      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+      a.Store(AccessBuilder::ForJSObjectElements(), elements);
+      a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
+              jsgraph()->Constant(length));
+      RelaxControls(node);
+      a.FinishAndChange(node);
+      return Changed(node);
+    }
+  }
+
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
+                                           int capacity,
+                                           Handle<AllocationSite> site) {
+  DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Extract transition and tenuring feedback from the {site} and add
+  // appropriate code dependencies on the {site} if deoptimization is
+  // enabled.
+  PretenureFlag pretenure = site->GetPretenureMode();
+  ElementsKind elements_kind = site->GetElementsKind();
+  DCHECK(IsFastElementsKind(elements_kind));
+  dependencies()->AssumeTenuringDecision(site);
+  dependencies()->AssumeTransitionStable(site);
+
+  // Retrieve the initial map for the array from the appropriate native context.
+  Node* native_context = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+      context, context, effect);
+  Node* js_array_map = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::ArrayMapIndex(elements_kind), true),
+      native_context, native_context, effect);
+
+  // Setup elements and properties.
+  Node* elements;
+  if (capacity == 0) {
+    elements = jsgraph()->EmptyFixedArrayConstant();
+  } else {
+    elements = effect =
+        AllocateElements(effect, control, elements_kind, capacity, pretenure);
+  }
+  Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+  // Perform the allocation of the actual JSArray object.
+  AllocationBuilder a(jsgraph(), effect, control);
+  a.Allocate(JSArray::kSize, pretenure);
+  a.Store(AccessBuilder::ForMap(), js_array_map);
+  a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+  a.Store(AccessBuilder::ForJSObjectElements(), elements);
+  a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
+  RelaxControls(node);
+  a.FinishAndChange(node);
+  return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+  CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+  Node* target = NodeProperties::GetValueInput(node, 0);
+  Node* new_target = NodeProperties::GetValueInput(node, 1);
+
+  // TODO(bmeurer): Optimize the subclassing case.
+  if (target != new_target) return NoChange();
+
+  // Check if we have a feedback {site} on the {node}.
+  Handle<AllocationSite> site = p.site();
+  if (p.site().is_null()) return NoChange();
+
+  // Attempt to inline calls to the Array constructor for the relevant cases
+  // where either no arguments are provided, or exactly one unsigned number
+  // argument is given.
+  if (site->CanInlineCall()) {
+    if (p.arity() == 0) {
+      Node* length = jsgraph()->ZeroConstant();
+      int capacity = JSArray::kPreallocatedArrayElements;
+      return ReduceNewArray(node, length, capacity, site);
+    } else if (p.arity() == 1) {
+      Node* length = NodeProperties::GetValueInput(node, 2);
+      Type* length_type = NodeProperties::GetType(length);
+      if (length_type->Is(Type::SignedSmall()) &&
+          length_type->Min() >= 0 &&
+          length_type->Max() <= kElementLoopUnrollLimit) {
+        int capacity = static_cast<int>(length_type->Max());
+        return ReduceNewArray(node, length, capacity, site);
+      }
+    }
+  }
+
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
+  Node* value = NodeProperties::GetValueInput(node, 0);
+  Node* done = NodeProperties::GetValueInput(node, 1);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+
+  // Load the JSIteratorResult map for the {context}.
+  Node* native_context = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+      context, context, effect);
+  Node* iterator_result_map = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
+      native_context, native_context, effect);
+
+  // Emit code to allocate the JSIteratorResult instance.
+  AllocationBuilder a(jsgraph(), effect, graph()->start());
+  a.Allocate(JSIteratorResult::kSize);
+  a.Store(AccessBuilder::ForMap(), iterator_result_map);
+  a.Store(AccessBuilder::ForJSObjectProperties(),
+          jsgraph()->EmptyFixedArrayConstant());
+  a.Store(AccessBuilder::ForJSObjectElements(),
+          jsgraph()->EmptyFixedArrayConstant());
+  a.Store(AccessBuilder::ForJSIteratorResultValue(), value);
+  a.Store(AccessBuilder::ForJSIteratorResultDone(), done);
+  STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+  a.FinishAndChange(node);
+  return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateLiteral(Node* node) {
+  DCHECK(node->opcode() == IrOpcode::kJSCreateLiteralArray ||
+         node->opcode() == IrOpcode::kJSCreateLiteralObject);
+  CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  Handle<LiteralsArray> literals_array;
+  if (GetSpecializationLiterals(node).ToHandle(&literals_array)) {
+    Handle<Object> literal(literals_array->literal(p.index()), isolate());
+    if (literal->IsAllocationSite()) {
+      Handle<AllocationSite> site = Handle<AllocationSite>::cast(literal);
+      Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()),
+                                   isolate());
+      int max_properties = kMaxFastLiteralProperties;
+      if (IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
+        AllocationSiteUsageContext site_context(isolate(), site, false);
+        site_context.EnterNewScope();
+        Node* value = effect =
+            AllocateFastLiteral(effect, control, boilerplate, &site_context);
+        site_context.ExitScope(site, boilerplate);
+        ReplaceWithValue(node, value, effect, control);
+        return Replace(value);
+      }
+    }
+  }
+
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
+  int slot_count = OpParameter<int>(node->op());
+  Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+  // Use inline allocation for function contexts up to a size limit.
+  if (slot_count < kFunctionContextAllocationLimit) {
+    // JSCreateFunctionContext[slot_count < limit]](fun)
+    Node* effect = NodeProperties::GetEffectInput(node);
+    Node* control = NodeProperties::GetControlInput(node);
+    Node* context = NodeProperties::GetContextInput(node);
+    Node* extension = jsgraph()->TheHoleConstant();
+    Node* native_context = effect = graph()->NewNode(
+        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+        context, context, effect);
+    AllocationBuilder a(jsgraph(), effect, control);
+    STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
+    int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
+    a.AllocateArray(context_length, factory()->function_context_map());
+    a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+    a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+    a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
+    a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+            native_context);
+    for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
+      a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
+    }
+    RelaxControls(node);
+    a.FinishAndChange(node);
+    return Changed(node);
+  }
+
+  return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
+  Node* object = NodeProperties::GetValueInput(node, 0);
+  Node* closure = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* native_context = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+      context, context, effect);
+  AllocationBuilder a(jsgraph(), effect, control);
+  STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
+  a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
+  a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+  a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), object);
+  a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+          native_context);
+  RelaxControls(node);
+  a.FinishAndChange(node);
+  return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
+  Handle<String> name = OpParameter<Handle<String>>(node);
+  Node* exception = NodeProperties::GetValueInput(node, 0);
+  Node* closure = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* native_context = effect = graph()->NewNode(
+      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+      context, context, effect);
+  AllocationBuilder a(jsgraph(), effect, control);
+  STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
+  a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
+                  factory()->catch_context_map());
+  a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+  a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), name);
+  a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+          native_context);
+  a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
+          exception);
+  RelaxControls(node);
+  a.FinishAndChange(node);
+  return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
+  Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+  int const context_length = scope_info->ContextLength();
+  Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+  // Use inline allocation for block contexts up to a size limit.
+  if (context_length < kBlockContextAllocationLimit) {
+    // JSCreateBlockContext[scope[length < limit]](fun)
+    Node* effect = NodeProperties::GetEffectInput(node);
+    Node* control = NodeProperties::GetControlInput(node);
+    Node* context = NodeProperties::GetContextInput(node);
+    Node* extension = jsgraph()->Constant(scope_info);
+    Node* native_context = effect = graph()->NewNode(
+        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+        context, context, effect);
+    AllocationBuilder a(jsgraph(), effect, control);
+    STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
+    a.AllocateArray(context_length, factory()->block_context_map());
+    a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+    a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+    a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
+    a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+            native_context);
+    for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
+      a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
+    }
+    RelaxControls(node);
+    a.FinishAndChange(node);
+    return Changed(node);
+  }
+
+  return NoChange();
+}
+
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
+                                          Node* frame_state) {
+  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
+  if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+  // Prepare an iterator over argument values recorded in the frame state.
+  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+  StateValuesAccess parameters_access(parameters);
+  auto parameters_it = ++parameters_access.begin();
+
+  // Actually allocate the backing store.
+  AllocationBuilder a(jsgraph(), effect, control);
+  a.AllocateArray(argument_count, factory()->fixed_array_map());
+  for (int i = 0; i < argument_count; ++i, ++parameters_it) {
+    a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+  }
+  return a.Finish();
+}
+
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
+                                              Node* frame_state,
+                                              int start_index) {
+  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
+  int num_elements = std::max(0, argument_count - start_index);
+  if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+  // Prepare an iterator over argument values recorded in the frame state.
+  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+  StateValuesAccess parameters_access(parameters);
+  auto parameters_it = ++parameters_access.begin();
+
+  // Skip unused arguments.
+  for (int i = 0; i < start_index; i++) {
+    ++parameters_it;
+  }
+
+  // Actually allocate the backing store.
+  AllocationBuilder a(jsgraph(), effect, control);
+  a.AllocateArray(num_elements, factory()->fixed_array_map());
+  for (int i = 0; i < num_elements; ++i, ++parameters_it) {
+    a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+  }
+  return a.Finish();
+}
+
+// Helper that allocates a FixedArray serving as a parameter map for values
+// recorded in the given {frame_state}. Some elements map to slots within the
+// given {context}. Serves as backing store for JSCreateArguments nodes.
+Node* JSCreateLowering::AllocateAliasedArguments(
+    Node* effect, Node* control, Node* frame_state, Node* context,
+    Handle<SharedFunctionInfo> shared, bool* has_aliased_arguments) {
+  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
+  if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+  // If there is no aliasing, the arguments object elements are not special in
+  // any way, we can just return an unmapped backing store instead.
+  int parameter_count = shared->internal_formal_parameter_count();
+  if (parameter_count == 0) {
+    return AllocateArguments(effect, control, frame_state);
+  }
+
+  // Calculate number of argument values being aliased/mapped.
+  int mapped_count = Min(argument_count, parameter_count);
+  *has_aliased_arguments = true;
+
+  // Prepare an iterator over argument values recorded in the frame state.
+  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+  StateValuesAccess parameters_access(parameters);
+  auto paratemers_it = ++parameters_access.begin();
+
+  // The unmapped argument values recorded in the frame state are stored yet
+  // another indirection away and then linked into the parameter map below,
+  // whereas mapped argument values are replaced with a hole instead.
+  AllocationBuilder aa(jsgraph(), effect, control);
+  aa.AllocateArray(argument_count, factory()->fixed_array_map());
+  for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
+    aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
+  }
+  for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
+    aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+  }
+  Node* arguments = aa.Finish();
+
+  // Actually allocate the backing store.
+  AllocationBuilder a(jsgraph(), arguments, control);
+  a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
+  a.Store(AccessBuilder::ForFixedArraySlot(0), context);
+  a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
+  for (int i = 0; i < mapped_count; ++i) {
+    int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
+    a.Store(AccessBuilder::ForFixedArraySlot(i + 2), jsgraph()->Constant(idx));
+  }
+  return a.Finish();
+}
+
+Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
+                                         ElementsKind elements_kind,
+                                         int capacity,
+                                         PretenureFlag pretenure) {
+  DCHECK_LE(1, capacity);
+  DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
+
+  Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+                                 ? factory()->fixed_double_array_map()
+                                 : factory()->fixed_array_map();
+  ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+                             ? AccessBuilder::ForFixedDoubleArrayElement()
+                             : AccessBuilder::ForFixedArrayElement();
+  Node* value =
+      IsFastDoubleElementsKind(elements_kind)
+          ? jsgraph()->Float64Constant(bit_cast<double>(kHoleNanInt64))
+          : jsgraph()->TheHoleConstant();
+
+  // Actually allocate the backing store.
+  AllocationBuilder a(jsgraph(), effect, control);
+  a.AllocateArray(capacity, elements_map, pretenure);
+  for (int i = 0; i < capacity; ++i) {
+    Node* index = jsgraph()->Constant(i);
+    a.Store(access, index, value);
+  }
+  return a.Finish();
+}
+
+Node* JSCreateLowering::AllocateFastLiteral(
+    Node* effect, Node* control, Handle<JSObject> boilerplate,
+    AllocationSiteUsageContext* site_context) {
+  Handle<AllocationSite> current_site(*site_context->current(), isolate());
+  dependencies()->AssumeTransitionStable(current_site);
+
+  PretenureFlag pretenure = NOT_TENURED;
+  if (FLAG_allocation_site_pretenuring) {
+    Handle<AllocationSite> top_site(*site_context->top(), isolate());
+    pretenure = top_site->GetPretenureMode();
+    if (current_site.is_identical_to(top_site)) {
+      // We install a dependency for pretenuring only on the outermost literal.
+      dependencies()->AssumeTenuringDecision(top_site);
+    }
+  }
+
+  // Setup the properties backing store.
+  Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+  // Setup the elements backing store.
+  Node* elements = AllocateFastLiteralElements(effect, control, boilerplate,
+                                               pretenure, site_context);
+  if (elements->op()->EffectOutputCount() > 0) effect = elements;
+
+  // Compute the in-object properties to store first (might have effects).
+  Handle<Map> boilerplate_map(boilerplate->map(), isolate());
+  ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone());
+  inobject_fields.reserve(boilerplate_map->GetInObjectProperties());
+  int const boilerplate_nof = boilerplate_map->NumberOfOwnDescriptors();
+  for (int i = 0; i < boilerplate_nof; ++i) {
+    PropertyDetails const property_details =
+        boilerplate_map->instance_descriptors()->GetDetails(i);
+    if (property_details.type() != DATA) continue;
+    Handle<Name> property_name(
+        boilerplate_map->instance_descriptors()->GetKey(i), isolate());
+    FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
+    FieldAccess access = {kTaggedBase, index.offset(), property_name,
+                          Type::Tagged(), MachineType::AnyTagged()};
+    Node* value;
+    if (boilerplate->IsUnboxedDoubleField(index)) {
+      access.machine_type = MachineType::Float64();
+      access.type = Type::Number();
+      value = jsgraph()->Constant(boilerplate->RawFastDoublePropertyAt(index));
+    } else {
+      Handle<Object> boilerplate_value(boilerplate->RawFastPropertyAt(index),
+                                       isolate());
+      if (boilerplate_value->IsJSObject()) {
+        Handle<JSObject> boilerplate_object =
+            Handle<JSObject>::cast(boilerplate_value);
+        Handle<AllocationSite> current_site = site_context->EnterNewScope();
+        value = effect = AllocateFastLiteral(effect, control,
+                                             boilerplate_object, site_context);
+        site_context->ExitScope(current_site, boilerplate_object);
+      } else if (property_details.representation().IsDouble()) {
+        // Allocate a mutable HeapNumber box and store the value into it.
+        value = effect = AllocateMutableHeapNumber(
+            Handle<HeapNumber>::cast(boilerplate_value)->value(),
+            effect, control);
+      } else if (property_details.representation().IsSmi()) {
+        // Ensure that value is stored as smi.
+        value = boilerplate_value->IsUninitialized()
+                    ? jsgraph()->ZeroConstant()
+                    : jsgraph()->Constant(boilerplate_value);
+      } else {
+        value = jsgraph()->Constant(boilerplate_value);
+      }
+    }
+    inobject_fields.push_back(std::make_pair(access, value));
+  }
+
+  // Fill slack at the end of the boilerplate object with filler maps.
+  int const boilerplate_length = boilerplate_map->GetInObjectProperties();
+  for (int index = static_cast<int>(inobject_fields.size());
+       index < boilerplate_length; ++index) {
+    FieldAccess access =
+        AccessBuilder::ForJSObjectInObjectProperty(boilerplate_map, index);
+    Node* value = jsgraph()->HeapConstant(factory()->one_pointer_filler_map());
+    inobject_fields.push_back(std::make_pair(access, value));
+  }
+
+  // Actually allocate and initialize the object.
+  AllocationBuilder builder(jsgraph(), effect, control);
+  builder.Allocate(boilerplate_map->instance_size(), pretenure);
+  builder.Store(AccessBuilder::ForMap(), boilerplate_map);
+  builder.Store(AccessBuilder::ForJSObjectProperties(), properties);
+  builder.Store(AccessBuilder::ForJSObjectElements(), elements);
+  if (boilerplate_map->IsJSArrayMap()) {
+    Handle<JSArray> boilerplate_array = Handle<JSArray>::cast(boilerplate);
+    builder.Store(
+        AccessBuilder::ForJSArrayLength(boilerplate_array->GetElementsKind()),
+        handle(boilerplate_array->length(), isolate()));
+  }
+  for (auto const inobject_field : inobject_fields) {
+    builder.Store(inobject_field.first, inobject_field.second);
+  }
+  return builder.Finish();
+}
+
+Node* JSCreateLowering::AllocateFastLiteralElements(
+    Node* effect, Node* control, Handle<JSObject> boilerplate,
+    PretenureFlag pretenure, AllocationSiteUsageContext* site_context) {
+  Handle<FixedArrayBase> boilerplate_elements(boilerplate->elements(),
+                                              isolate());
+
+  // Empty or copy-on-write elements just store a constant.
+  if (boilerplate_elements->length() == 0 ||
+      boilerplate_elements->map() == isolate()->heap()->fixed_cow_array_map()) {
+    if (pretenure == TENURED &&
+        isolate()->heap()->InNewSpace(*boilerplate_elements)) {
+      // If we would like to pretenure a fixed cow array, we must ensure that
+      // the array is already in old space, otherwise we'll create too many
+      // old-to-new-space pointers (overflowing the store buffer).
+      boilerplate_elements = Handle<FixedArrayBase>(
+          isolate()->factory()->CopyAndTenureFixedCOWArray(
+              Handle<FixedArray>::cast(boilerplate_elements)));
+      boilerplate->set_elements(*boilerplate_elements);
+    }
+    return jsgraph()->HeapConstant(boilerplate_elements);
+  }
+
+  // Compute the elements to store first (might have effects).
+  int const elements_length = boilerplate_elements->length();
+  Handle<Map> elements_map(boilerplate_elements->map(), isolate());
+  ZoneVector<Node*> elements_values(elements_length, zone());
+  if (elements_map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE) {
+    Handle<FixedDoubleArray> elements =
+        Handle<FixedDoubleArray>::cast(boilerplate_elements);
+    for (int i = 0; i < elements_length; ++i) {
+      if (elements->is_the_hole(i)) {
+        // TODO(turbofan): We cannot currently safely pass thru the (signaling)
+        // hole NaN in C++ code, as the C++ compiler on Intel might use FPU
+        // instructions/registers for doubles and therefore make the NaN quiet.
+        // We should consider passing doubles in the compiler as raw int64
+        // values to prevent this.
+        elements_values[i] = effect =
+            graph()->NewNode(simplified()->LoadElement(
+                                 AccessBuilder::ForFixedDoubleArrayElement()),
+                             jsgraph()->HeapConstant(elements),
+                             jsgraph()->Constant(i), effect, control);
+      } else {
+        elements_values[i] = jsgraph()->Constant(elements->get_scalar(i));
+      }
+    }
+  } else {
+    Handle<FixedArray> elements =
+        Handle<FixedArray>::cast(boilerplate_elements);
+    for (int i = 0; i < elements_length; ++i) {
+      if (elements->is_the_hole(i)) {
+        elements_values[i] = jsgraph()->TheHoleConstant();
+      } else {
+        Handle<Object> element_value(elements->get(i), isolate());
+        if (element_value->IsJSObject()) {
+          Handle<JSObject> boilerplate_object =
+              Handle<JSObject>::cast(element_value);
+          Handle<AllocationSite> current_site = site_context->EnterNewScope();
+          elements_values[i] = effect = AllocateFastLiteral(
+              effect, control, boilerplate_object, site_context);
+          site_context->ExitScope(current_site, boilerplate_object);
+        } else {
+          elements_values[i] = jsgraph()->Constant(element_value);
+        }
+      }
+    }
+  }
+
+  // Allocate the backing store array and store the elements.
+  AllocationBuilder builder(jsgraph(), effect, control);
+  builder.AllocateArray(elements_length, elements_map, pretenure);
+  ElementAccess const access =
+      (elements_map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE)
+          ? AccessBuilder::ForFixedDoubleArrayElement()
+          : AccessBuilder::ForFixedArrayElement();
+  for (int i = 0; i < elements_length; ++i) {
+    builder.Store(access, jsgraph()->Constant(i), elements_values[i]);
+  }
+  return builder.Finish();
+}
+
+Node* JSCreateLowering::AllocateMutableHeapNumber(double value, Node* effect,
+                                                  Node* control) {
+  // TODO(turbofan): Support inline allocation of MutableHeapNumber
+  // (requires proper alignment on Allocate, and Begin/FinishRegion).
+  Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate());
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+      CallDescriptor::kNoFlags, Operator::kNoThrow);
+  Node* result = effect = graph()->NewNode(
+      common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+      jsgraph()->NoContextConstant(), effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
+      jsgraph()->Constant(value), effect, control);
+  return result;
+}
+
+MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
+    Node* node) {
+  Node* const closure = NodeProperties::GetValueInput(node, 0);
+  switch (closure->opcode()) {
+    case IrOpcode::kHeapConstant: {
+      Handle<HeapObject> object = OpParameter<Handle<HeapObject>>(closure);
+      return handle(Handle<JSFunction>::cast(object)->literals());
+    }
+    case IrOpcode::kParameter: {
+      int const index = ParameterIndexOf(closure->op());
+      // The closure is always the last parameter to a JavaScript function, and
+      // {Parameter} indices start at -1, so value outputs of {Start} look like
+      // this: closure, receiver, param0, ..., paramN, context.
+      if (index == -1) {
+        return literals_array_;
+      }
+      break;
+    }
+    default:
+      break;
+  }
+  return MaybeHandle<LiteralsArray>();
+}
+
+Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
+
+Graph* JSCreateLowering::graph() const { return jsgraph()->graph(); }
+
+Isolate* JSCreateLowering::isolate() const { return jsgraph()->isolate(); }
+
+JSOperatorBuilder* JSCreateLowering::javascript() const {
+  return jsgraph()->javascript();
+}
+
+CommonOperatorBuilder* JSCreateLowering::common() const {
+  return jsgraph()->common();
+}
+
+SimplifiedOperatorBuilder* JSCreateLowering::simplified() const {
+  return jsgraph()->simplified();
+}
+
+MachineOperatorBuilder* JSCreateLowering::machine() const {
+  return jsgraph()->machine();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h
new file mode 100644
index 0000000..d9d184b
--- /dev/null
+++ b/src/compiler/js-create-lowering.h
@@ -0,0 +1,99 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CREATE_LOWERING_H_
+#define V8_COMPILER_JS_CREATE_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class AllocationSiteUsageContext;
+class CompilationDependencies;
+class Factory;
+
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
+
+
+// Lowers JSCreate-level operators to fast (inline) allocations.
+class JSCreateLowering final : public AdvancedReducer {
+ public:
+  JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
+                   JSGraph* jsgraph, MaybeHandle<LiteralsArray> literals_array,
+                   Zone* zone)
+      : AdvancedReducer(editor),
+        dependencies_(dependencies),
+        jsgraph_(jsgraph),
+        literals_array_(literals_array),
+        zone_(zone) {}
+  ~JSCreateLowering() final {}
+
+  Reduction Reduce(Node* node) final;
+
+ private:
+  Reduction ReduceJSCreate(Node* node);
+  Reduction ReduceJSCreateArguments(Node* node);
+  Reduction ReduceJSCreateArray(Node* node);
+  Reduction ReduceJSCreateIterResultObject(Node* node);
+  Reduction ReduceJSCreateLiteral(Node* node);
+  Reduction ReduceJSCreateFunctionContext(Node* node);
+  Reduction ReduceJSCreateWithContext(Node* node);
+  Reduction ReduceJSCreateCatchContext(Node* node);
+  Reduction ReduceJSCreateBlockContext(Node* node);
+  Reduction ReduceNewArray(Node* node, Node* length, int capacity,
+                           Handle<AllocationSite> site);
+
+  Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
+  Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
+                              int start_index);
+  Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
+                                 Node* context, Handle<SharedFunctionInfo>,
+                                 bool* has_aliased_arguments);
+  Node* AllocateElements(Node* effect, Node* control,
+                         ElementsKind elements_kind, int capacity,
+                         PretenureFlag pretenure);
+  Node* AllocateFastLiteral(Node* effect, Node* control,
+                            Handle<JSObject> boilerplate,
+                            AllocationSiteUsageContext* site_context);
+  Node* AllocateFastLiteralElements(Node* effect, Node* control,
+                                    Handle<JSObject> boilerplate,
+                                    PretenureFlag pretenure,
+                                    AllocationSiteUsageContext* site_context);
+  Node* AllocateMutableHeapNumber(double value, Node* effect, Node* control);
+
+  // Infers the LiteralsArray to use for a given {node}.
+  MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
+
+  Factory* factory() const;
+  Graph* graph() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Isolate* isolate() const;
+  JSOperatorBuilder* javascript() const;
+  CommonOperatorBuilder* common() const;
+  SimplifiedOperatorBuilder* simplified() const;
+  MachineOperatorBuilder* machine() const;
+  CompilationDependencies* dependencies() const { return dependencies_; }
+  Zone* zone() const { return zone_; }
+
+  CompilationDependencies* const dependencies_;
+  JSGraph* const jsgraph_;
+  MaybeHandle<LiteralsArray> const literals_array_;
+  Zone* const zone_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_CREATE_LOWERING_H_
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 15ce908..df2d908 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -62,16 +62,11 @@
   return Changed(node);
 }
 
-
-#define REPLACE_BINARY_OP_IC_CALL(Op, token)                                  \
-  void JSGenericLowering::Lower##Op(Node* node) {                             \
-    BinaryOperationParameters const& p =                                      \
-        BinaryOperationParametersOf(node->op());                              \
-    CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);             \
-    ReplaceWithStubCall(node,                                                 \
-                        CodeFactory::BinaryOpIC(isolate(), token,             \
-                                                strength(p.language_mode())), \
-                        CallDescriptor::kPatchableCallSiteWithNop | flags);   \
+#define REPLACE_BINARY_OP_IC_CALL(Op, token)                                \
+  void JSGenericLowering::Lower##Op(Node* node) {                           \
+    CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);           \
+    ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token),    \
+                        CallDescriptor::kPatchableCallSiteWithNop | flags); \
   }
 REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
 REPLACE_BINARY_OP_IC_CALL(JSBitwiseXor, Token::BIT_XOR)
@@ -86,128 +81,22 @@
 REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
 #undef REPLACE_BINARY_OP_IC_CALL
 
-
-// These ops are not language mode dependent; we arbitrarily pass Strength::WEAK
-// here.
-#define REPLACE_COMPARE_IC_CALL(op, token)             \
-  void JSGenericLowering::Lower##op(Node* node) {      \
-    ReplaceWithCompareIC(node, token, Strength::WEAK); \
-  }
-REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ)
-REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE)
-REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT)
-REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT)
-#undef REPLACE_COMPARE_IC_CALL
-
-
-#define REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(op, token)        \
-  void JSGenericLowering::Lower##op(Node* node) {                    \
-    ReplaceWithCompareIC(node, token,                                \
-                         strength(OpParameter<LanguageMode>(node))); \
-  }
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSLessThan, Token::LT)
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThan, Token::GT)
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSLessThanOrEqual, Token::LTE)
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThanOrEqual, Token::GTE)
-#undef REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE
-
-
 #define REPLACE_RUNTIME_CALL(op, fun)             \
   void JSGenericLowering::Lower##op(Node* node) { \
     ReplaceWithRuntimeCall(node, fun);            \
   }
-REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
+REPLACE_RUNTIME_CALL(JSEqual, Runtime::kEqual)
+REPLACE_RUNTIME_CALL(JSNotEqual, Runtime::kNotEqual)
+REPLACE_RUNTIME_CALL(JSStrictEqual, Runtime::kStrictEqual)
+REPLACE_RUNTIME_CALL(JSStrictNotEqual, Runtime::kStrictNotEqual)
+REPLACE_RUNTIME_CALL(JSLessThan, Runtime::kLessThan)
+REPLACE_RUNTIME_CALL(JSGreaterThan, Runtime::kGreaterThan)
+REPLACE_RUNTIME_CALL(JSLessThanOrEqual, Runtime::kLessThanOrEqual)
+REPLACE_RUNTIME_CALL(JSGreaterThanOrEqual, Runtime::kGreaterThanOrEqual)
 REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
 REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
 REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
-#undef REPLACE_RUNTIME
-
-
-static CallDescriptor::Flags FlagsForNode(Node* node) {
-  CallDescriptor::Flags result = CallDescriptor::kNoFlags;
-  if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
-    result |= CallDescriptor::kNeedsFrameState;
-  }
-  return result;
-}
-
-
-void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
-                                             Strength str) {
-  Callable callable = CodeFactory::CompareIC(isolate(), token, str);
-
-  // Create a new call node asking a CompareIC for help.
-  NodeVector inputs(zone());
-  inputs.reserve(node->InputCount() + 1);
-  inputs.push_back(jsgraph()->HeapConstant(callable.code()));
-  inputs.push_back(NodeProperties::GetValueInput(node, 0));
-  inputs.push_back(NodeProperties::GetValueInput(node, 1));
-  inputs.push_back(NodeProperties::GetContextInput(node));
-  // Some comparisons (StrictEqual) don't have an effect, control or frame
-  // state inputs, so handle those cases here.
-  if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
-    inputs.push_back(NodeProperties::GetFrameStateInput(node, 0));
-  }
-  Node* effect = (node->op()->EffectInputCount() > 0)
-                     ? NodeProperties::GetEffectInput(node)
-                     : graph()->start();
-  inputs.push_back(effect);
-  Node* control = (node->op()->ControlInputCount() > 0)
-                      ? NodeProperties::GetControlInput(node)
-                      : graph()->start();
-  inputs.push_back(control);
-  CallDescriptor* desc_compare = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), callable.descriptor(), 0,
-      CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node),
-      Operator::kNoProperties, MachineType::IntPtr());
-  Node* compare =
-      graph()->NewNode(common()->Call(desc_compare),
-                       static_cast<int>(inputs.size()), &inputs.front());
-
-  // Decide how the return value from the above CompareIC can be converted into
-  // a JavaScript boolean oddball depending on the given token.
-  Node* false_value = jsgraph()->FalseConstant();
-  Node* true_value = jsgraph()->TrueConstant();
-  const Operator* op = nullptr;
-  switch (token) {
-    case Token::EQ:  // a == 0
-    case Token::EQ_STRICT:
-      op = machine()->WordEqual();
-      break;
-    case Token::NE:  // a != 0 becomes !(a == 0)
-    case Token::NE_STRICT:
-      op = machine()->WordEqual();
-      std::swap(true_value, false_value);
-      break;
-    case Token::LT:  // a < 0
-      op = machine()->IntLessThan();
-      break;
-    case Token::GT:  // a > 0 becomes !(a <= 0)
-      op = machine()->IntLessThanOrEqual();
-      std::swap(true_value, false_value);
-      break;
-    case Token::LTE:  // a <= 0
-      op = machine()->IntLessThanOrEqual();
-      break;
-    case Token::GTE:  // a >= 0 becomes !(a < 0)
-      op = machine()->IntLessThan();
-      std::swap(true_value, false_value);
-      break;
-    default:
-      UNREACHABLE();
-  }
-  Node* booleanize = graph()->NewNode(op, compare, jsgraph()->ZeroConstant());
-
-  // Finally patch the original node to select a boolean.
-  NodeProperties::ReplaceUses(node, node, compare, compare, compare);
-  node->TrimInputCount(3);
-  node->ReplaceInput(0, booleanize);
-  node->ReplaceInput(1, true_value);
-  node->ReplaceInput(2, false_value);
-  NodeProperties::ChangeOp(node,
-                           common()->Select(MachineRepresentation::kTagged));
-}
-
+#undef REPLACE_RUNTIME_CALL
 
 void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
                                             CallDescriptor::Flags flags) {
@@ -223,11 +112,12 @@
 void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
                                                Runtime::FunctionId f,
                                                int nargs_override) {
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   Operator::Properties properties = node->op()->properties();
   const Runtime::Function* fun = Runtime::FunctionForId(f);
   int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), f, nargs, properties, CallDescriptor::kNeedsFrameState);
+  CallDescriptor* desc =
+      Linkage::GetRuntimeCallDescriptor(zone(), f, nargs, properties, flags);
   Node* ref = jsgraph()->ExternalConstant(ExternalReference(f, isolate()));
   Node* arity = jsgraph()->Int32Constant(nargs);
   node->InsertInput(zone(), 0, jsgraph()->CEntryStubConstant(fun->result_size));
@@ -267,7 +157,9 @@
 
 
 void JSGenericLowering::LowerJSToName(Node* node) {
-  ReplaceWithRuntimeCall(node, Runtime::kToName);
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  Callable callable = CodeFactory::ToName(isolate());
+  ReplaceWithStubCall(node, callable, flags);
 }
 
 
@@ -279,99 +171,187 @@
 
 
 void JSGenericLowering::LowerJSLoadProperty(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 2);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   const PropertyAccess& p = PropertyAccessOf(node->op());
-  Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(
-      isolate(), p.language_mode(), UNINITIALIZED);
+  Callable callable =
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate(), UNINITIALIZED);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(3, vector);
+  node->ReplaceInput(6, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSLoadNamed(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
   Callable callable = CodeFactory::LoadICInOptimizedCode(
-      isolate(), NOT_INSIDE_TYPEOF, p.language_mode(), UNINITIALIZED);
+      isolate(), NOT_INSIDE_TYPEOF, UNINITIALIZED);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(3, vector);
+  node->ReplaceInput(6, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 0);
   Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
   Callable callable = CodeFactory::LoadICInOptimizedCode(
-      isolate(), p.typeof_mode(), SLOPPY, UNINITIALIZED);
+      isolate(), p.typeof_mode(), UNINITIALIZED);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   // Load global object from the context.
-  Node* native_context =
+  Node* native_context = effect =
       graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
                        jsgraph()->IntPtrConstant(
                            Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
-                       effect, graph()->start());
-  Node* global = graph()->NewNode(
+                       effect, control);
+  Node* global = effect = graph()->NewNode(
       machine()->Load(MachineType::AnyTagged()), native_context,
       jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
-      effect, graph()->start());
+      effect, control);
   node->InsertInput(zone(), 0, global);
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(3, vector);
+  node->ReplaceInput(6, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSStoreProperty(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 3);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   PropertyAccess const& p = PropertyAccessOf(node->op());
   LanguageMode language_mode = p.language_mode();
   Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
       isolate(), language_mode, UNINITIALIZED);
-  DCHECK(p.feedback().index() != -1);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
-  ReplaceWithStubCall(node, callable,
-                      CallDescriptor::kPatchableCallSite | flags);
+  node->ReplaceInput(4, vector);
+  node->ReplaceInput(7, effect);
+  ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSStoreNamed(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 2);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
   Callable callable = CodeFactory::StoreICInOptimizedCode(
       isolate(), p.language_mode(), UNINITIALIZED);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
-  DCHECK(p.feedback().index() != -1);
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
-  ReplaceWithStubCall(node, callable,
-                      CallDescriptor::kPatchableCallSite | flags);
+  node->ReplaceInput(4, vector);
+  node->ReplaceInput(7, effect);
+  ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
+  Node* closure = NodeProperties::GetValueInput(node, 1);
   Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
   const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
   Callable callable = CodeFactory::StoreICInOptimizedCode(
       isolate(), p.language_mode(), UNINITIALIZED);
+  // Load the type feedback vector from the closure.
+  Node* shared_info = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), closure,
+      jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+                                kHeapObjectTag),
+      effect, control);
+  Node* vector = effect = graph()->NewNode(
+      machine()->Load(MachineType::AnyTagged()), shared_info,
+      jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+                                kHeapObjectTag),
+      effect, control);
   // Load global object from the context.
-  Node* native_context =
+  Node* native_context = effect =
       graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
                        jsgraph()->IntPtrConstant(
                            Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
-                       effect, graph()->start());
-  Node* global = graph()->NewNode(
+                       effect, control);
+  Node* global = effect = graph()->NewNode(
       machine()->Load(MachineType::AnyTagged()), native_context,
       jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
-      effect, graph()->start());
+      effect, control);
   node->InsertInput(zone(), 0, global);
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
-  DCHECK(p.feedback().index() != -1);
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
-  ReplaceWithStubCall(node, callable,
-                      CallDescriptor::kPatchableCallSite | flags);
+  node->ReplaceInput(4, vector);
+  node->ReplaceInput(7, effect);
+  ReplaceWithStubCall(node, callable, flags);
 }
 
 
@@ -433,38 +413,24 @@
 }
 
 
-void JSGenericLowering::LowerJSLoadDynamic(Node* node) {
-  const DynamicAccess& access = DynamicAccessOf(node->op());
-  Runtime::FunctionId function_id =
-      (access.typeof_mode() == NOT_INSIDE_TYPEOF)
-          ? Runtime::kLoadLookupSlot
-          : Runtime::kLoadLookupSlotNoReferenceError;
-  Node* projection = graph()->NewNode(common()->Projection(0), node);
-  NodeProperties::ReplaceUses(node, projection, node, node, node);
-  node->RemoveInput(NodeProperties::FirstValueIndex(node));
-  node->InsertInput(zone(), 1, jsgraph()->Constant(access.name()));
-  ReplaceWithRuntimeCall(node, function_id);
-  projection->ReplaceInput(0, node);
-}
-
-
 void JSGenericLowering::LowerJSCreate(Node* node) {
-  ReplaceWithRuntimeCall(node, Runtime::kNewObject);
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  Callable callable = CodeFactory::FastNewObject(isolate());
+  ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSCreateArguments(Node* node) {
-  const CreateArgumentsParameters& p = CreateArgumentsParametersOf(node->op());
-  switch (p.type()) {
-    case CreateArgumentsParameters::kMappedArguments:
+  CreateArgumentsType const type = CreateArgumentsTypeOf(node->op());
+  switch (type) {
+    case CreateArgumentsType::kMappedArguments:
       ReplaceWithRuntimeCall(node, Runtime::kNewSloppyArguments_Generic);
       break;
-    case CreateArgumentsParameters::kUnmappedArguments:
-      ReplaceWithRuntimeCall(node, Runtime::kNewStrictArguments_Generic);
+    case CreateArgumentsType::kUnmappedArguments:
+      ReplaceWithRuntimeCall(node, Runtime::kNewStrictArguments);
       break;
-    case CreateArgumentsParameters::kRestArray:
-      node->InsertInput(zone(), 1, jsgraph()->Constant(p.start_index()));
-      ReplaceWithRuntimeCall(node, Runtime::kNewRestArguments_Generic);
+    case CreateArgumentsType::kRestParameter:
+      ReplaceWithRuntimeCall(node, Runtime::kNewRestParameter);
       break;
   }
 }
@@ -473,7 +439,8 @@
 void JSGenericLowering::LowerJSCreateArray(Node* node) {
   CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
   int const arity = static_cast<int>(p.arity());
-  Node* new_target = node->InputAt(1);
+  Handle<AllocationSite> const site = p.site();
+
   // TODO(turbofan): We embed the AllocationSite from the Operator at this
   // point, which we should not do once we want to both consume the feedback
   // but at the same time shared the optimized code across native contexts,
@@ -481,21 +448,93 @@
   // stored in the type feedback vector after all). Once we go for cross
   // context code generation, we should somehow find a way to get to the
   // allocation site for the actual native context at runtime.
-  Node* type_info = p.site().is_null() ? jsgraph()->UndefinedConstant()
-                                       : jsgraph()->HeapConstant(p.site());
-  node->RemoveInput(1);
-  node->InsertInput(zone(), 1 + arity, new_target);
-  node->InsertInput(zone(), 2 + arity, type_info);
-  ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
+  if (!site.is_null()) {
+    // Reduce {node} to the appropriate ArrayConstructorStub backend.
+    // Note that these stubs "behave" like JSFunctions, which means they
+    // expect a receiver on the stack, which they remove. We just push
+    // undefined for the receiver.
+    ElementsKind elements_kind = site->GetElementsKind();
+    AllocationSiteOverrideMode override_mode =
+        (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+            ? DISABLE_ALLOCATION_SITES
+            : DONT_OVERRIDE;
+    if (arity == 0) {
+      ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
+                                          override_mode);
+      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
+          CallDescriptor::kNeedsFrameState);
+      node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+      node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+      node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
+      NodeProperties::ChangeOp(node, common()->Call(desc));
+    } else if (arity == 1) {
+      // TODO(bmeurer): Optimize for the 0 length non-holey case?
+      ArraySingleArgumentConstructorStub stub(
+          isolate(), GetHoleyElementsKind(elements_kind), override_mode);
+      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+          CallDescriptor::kNeedsFrameState);
+      node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+      node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
+      node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+      NodeProperties::ChangeOp(node, common()->Call(desc));
+    } else {
+      ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
+                                          override_mode);
+      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+          isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+          arity + 1, CallDescriptor::kNeedsFrameState);
+      node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+      node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+      node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+      node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+      NodeProperties::ChangeOp(node, common()->Call(desc));
+    }
+  } else {
+    Node* new_target = node->InputAt(1);
+    Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
+                                     : jsgraph()->HeapConstant(site);
+    node->RemoveInput(1);
+    node->InsertInput(zone(), 1 + arity, new_target);
+    node->InsertInput(zone(), 2 + arity, type_info);
+    ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
+  }
 }
 
 
 void JSGenericLowering::LowerJSCreateClosure(Node* node) {
-  CreateClosureParameters p = CreateClosureParametersOf(node->op());
-  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.shared_info()));
-  ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
-                                   ? Runtime::kNewClosure_Tenured
-                                   : Runtime::kNewClosure);
+  CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  Handle<SharedFunctionInfo> const shared_info = p.shared_info();
+  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
+
+  // Use the FastNewClosureStub that allocates in new space only for nested
+  // functions that don't need literals cloning.
+  if (p.pretenure() == NOT_TENURED && shared_info->num_literals() == 0) {
+    Callable callable = CodeFactory::FastNewClosure(
+        isolate(), shared_info->language_mode(), shared_info->kind());
+    ReplaceWithStubCall(node, callable, flags);
+  } else {
+    ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
+                                     ? Runtime::kNewClosure_Tenured
+                                     : Runtime::kNewClosure);
+  }
+}
+
+
+void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
+  int const slot_count = OpParameter<int>(node->op());
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+
+  // Use the FastNewContextStub only for function contexts up maximum size.
+  if (slot_count <= FastNewContextStub::kMaximumSlots) {
+    Callable callable = CodeFactory::FastNewContext(isolate(), slot_count);
+    ReplaceWithStubCall(node, callable, flags);
+  } else {
+    ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
+  }
 }
 
 
@@ -506,19 +545,42 @@
 
 void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  int const length = Handle<FixedArray>::cast(p.constant())->length();
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
-  node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
-  ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
+
+  // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
+  // initial length limit for arrays with "fast" elements kind.
+  if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
+      (p.flags() & ArrayLiteral::kIsStrong) == 0 &&
+      length < JSArray::kInitialMaxFastElementArray) {
+    Callable callable = CodeFactory::FastCloneShallowArray(isolate());
+    ReplaceWithStubCall(node, callable, flags);
+  } else {
+    node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
+    ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
+  }
 }
 
 
 void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
   CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+  CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+  int const length = Handle<FixedArray>::cast(p.constant())->length();
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
-  ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
+
+  // Use the FastCloneShallowObjectStub only for shallow boilerplates without
+  // elements up to the number of properties that the stubs can handle.
+  if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
+      length <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
+    Callable callable = CodeFactory::FastCloneShallowObject(isolate(), length);
+    ReplaceWithStubCall(node, callable, flags);
+  } else {
+    ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
+  }
 }
 
 
@@ -614,173 +676,7 @@
 
 
 void JSGenericLowering::LowerJSForInPrepare(Node* node) {
-  Node* object = NodeProperties::GetValueInput(node, 0);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
-
-  // Get the set of properties to enumerate.
-  Runtime::Function const* function =
-      Runtime::FunctionForId(Runtime::kGetPropertyNamesFast);
-  CallDescriptor const* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function->function_id, 1, Operator::kNoProperties,
-      CallDescriptor::kNeedsFrameState);
-  Node* cache_type = effect = graph()->NewNode(
-      common()->Call(descriptor),
-      jsgraph()->CEntryStubConstant(function->result_size), object,
-      jsgraph()->ExternalConstant(function->function_id),
-      jsgraph()->Int32Constant(1), context, frame_state, effect, control);
-  control = graph()->NewNode(common()->IfSuccess(), cache_type);
-
-  Node* object_map = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), object,
-      jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
-      effect, control);
-  Node* cache_type_map = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), cache_type,
-      jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
-      effect, control);
-  Node* meta_map = jsgraph()->HeapConstant(isolate()->factory()->meta_map());
-
-  // If we got a map from the GetPropertyNamesFast runtime call, we can do a
-  // fast modification check. Otherwise, we got a fixed array, and we have to
-  // perform a slow check on every iteration.
-  Node* check0 =
-      graph()->NewNode(machine()->WordEqual(), cache_type_map, meta_map);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* cache_array_true0;
-  Node* cache_length_true0;
-  Node* cache_type_true0;
-  Node* etrue0;
-  {
-    // Enum cache case.
-    Node* cache_type_enum_length = etrue0 = graph()->NewNode(
-        machine()->Load(MachineType::Uint32()), cache_type,
-        jsgraph()->IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag),
-        effect, if_true0);
-    cache_type_enum_length =
-        graph()->NewNode(machine()->Word32And(), cache_type_enum_length,
-                         jsgraph()->Uint32Constant(Map::EnumLengthBits::kMask));
-
-    Node* check1 =
-        graph()->NewNode(machine()->Word32Equal(), cache_type_enum_length,
-                         jsgraph()->Int32Constant(0));
-    Node* branch1 =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* cache_array_true1;
-    Node* etrue1;
-    {
-      // No properties to enumerate.
-      cache_array_true1 =
-          jsgraph()->HeapConstant(isolate()->factory()->empty_fixed_array());
-      etrue1 = etrue0;
-    }
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* cache_array_false1;
-    Node* efalse1;
-    {
-      // Load the enumeration cache from the instance descriptors of {object}.
-      Node* object_map_descriptors = efalse1 = graph()->NewNode(
-          machine()->Load(MachineType::AnyTagged()), object_map,
-          jsgraph()->IntPtrConstant(Map::kDescriptorsOffset - kHeapObjectTag),
-          etrue0, if_false1);
-      Node* object_map_enum_cache = efalse1 = graph()->NewNode(
-          machine()->Load(MachineType::AnyTagged()), object_map_descriptors,
-          jsgraph()->IntPtrConstant(DescriptorArray::kEnumCacheOffset -
-                                    kHeapObjectTag),
-          efalse1, if_false1);
-      cache_array_false1 = efalse1 = graph()->NewNode(
-          machine()->Load(MachineType::AnyTagged()), object_map_enum_cache,
-          jsgraph()->IntPtrConstant(
-              DescriptorArray::kEnumCacheBridgeCacheOffset - kHeapObjectTag),
-          efalse1, if_false1);
-    }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    etrue0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
-    cache_array_true0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                         cache_array_true1, cache_array_false1, if_true0);
-
-    cache_length_true0 = graph()->NewNode(
-        machine()->WordShl(),
-        machine()->Is64()
-            ? graph()->NewNode(machine()->ChangeUint32ToUint64(),
-                               cache_type_enum_length)
-            : cache_type_enum_length,
-        jsgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize));
-    cache_type_true0 = cache_type;
-  }
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* cache_array_false0;
-  Node* cache_length_false0;
-  Node* cache_type_false0;
-  Node* efalse0;
-  {
-    // FixedArray case.
-    cache_type_false0 = jsgraph()->OneConstant();  // Smi means slow check
-    cache_array_false0 = cache_type;
-    cache_length_false0 = efalse0 = graph()->NewNode(
-        machine()->Load(MachineType::AnyTagged()), cache_array_false0,
-        jsgraph()->IntPtrConstant(FixedArray::kLengthOffset - kHeapObjectTag),
-        effect, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  Node* cache_array =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_array_true0, cache_array_false0, control);
-  Node* cache_length =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_length_true0, cache_length_false0, control);
-  cache_type =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_type_true0, cache_type_false0, control);
-
-  for (auto edge : node->use_edges()) {
-    if (NodeProperties::IsEffectEdge(edge)) {
-      edge.UpdateTo(effect);
-    } else if (NodeProperties::IsControlEdge(edge)) {
-      Node* const use = edge.from();
-      if (use->opcode() == IrOpcode::kIfSuccess) {
-        use->ReplaceUses(control);
-        use->Kill();
-      } else if (use->opcode() == IrOpcode::kIfException) {
-        edge.UpdateTo(cache_type_true0);
-      } else {
-        UNREACHABLE();
-      }
-    } else {
-      Node* const use = edge.from();
-      DCHECK(NodeProperties::IsValueEdge(edge));
-      DCHECK_EQ(IrOpcode::kProjection, use->opcode());
-      switch (ProjectionIndexOf(use->op())) {
-        case 0:
-          use->ReplaceUses(cache_type);
-          break;
-        case 1:
-          use->ReplaceUses(cache_array);
-          break;
-        case 2:
-          use->ReplaceUses(cache_length);
-          break;
-        default:
-          UNREACHABLE();
-          break;
-      }
-      use->Kill();
-    }
-  }
+  ReplaceWithRuntimeCall(node, Runtime::kForInPrepare);
 }
 
 
diff --git a/src/compiler/js-generic-lowering.h b/src/compiler/js-generic-lowering.h
index ffce912..5ee759b 100644
--- a/src/compiler/js-generic-lowering.h
+++ b/src/compiler/js-generic-lowering.h
@@ -36,7 +36,6 @@
 #undef DECLARE_LOWER
 
   // Helpers to replace existing nodes with a generic call.
-  void ReplaceWithCompareIC(Node* node, Token::Value token, Strength strength);
   void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
   void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
 
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
index e6f01b3..132dec6 100644
--- a/src/compiler/js-global-object-specialization.cc
+++ b/src/compiler/js-global-object-specialization.cc
@@ -27,11 +27,10 @@
 
 
 JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
-    Editor* editor, JSGraph* jsgraph, Flags flags,
+    Editor* editor, JSGraph* jsgraph,
     MaybeHandle<Context> native_context, CompilationDependencies* dependencies)
     : AdvancedReducer(editor),
       jsgraph_(jsgraph),
-      flags_(flags),
       native_context_(native_context),
       dependencies_(dependencies),
       type_cache_(TypeCache::Get()) {}
@@ -49,7 +48,6 @@
   return NoChange();
 }
 
-
 Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
   Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
@@ -88,47 +86,36 @@
     return Replace(value);
   }
 
-  // Load from non-configurable, data property on the global can be lowered to
-  // a field load, even without deoptimization, because the property cannot be
-  // deleted or reconfigured to an accessor/interceptor property.  Yet, if
-  // deoptimization support is available, we can constant-fold certain global
-  // properties or at least lower them to field loads annotated with more
-  // precise type feedback.
+  // Record a code dependency on the cell if we can benefit from the
+  // additional feedback, or the global property is configurable (i.e.
+  // can be deleted or reconfigured to an accessor property).
+  if (property_details.cell_type() != PropertyCellType::kMutable ||
+      property_details.IsConfigurable()) {
+    dependencies()->AssumePropertyCell(property_cell);
+  }
+
+  // Load from constant/undefined global property can be constant-folded.
+  if (property_details.cell_type() == PropertyCellType::kConstant ||
+      property_details.cell_type() == PropertyCellType::kUndefined) {
+    Node* value = jsgraph()->Constant(property_cell_value);
+    ReplaceWithValue(node, value);
+    return Replace(value);
+  }
+
+  // Load from constant type cell can benefit from type feedback.
   Type* property_cell_value_type = Type::Tagged();
-  if (flags() & kDeoptimizationEnabled) {
-    // Record a code dependency on the cell if we can benefit from the
-    // additional feedback, or the global property is configurable (i.e.
-    // can be deleted or reconfigured to an accessor property).
-    if (property_details.cell_type() != PropertyCellType::kMutable ||
-        property_details.IsConfigurable()) {
-      dependencies()->AssumePropertyCell(property_cell);
+  if (property_details.cell_type() == PropertyCellType::kConstantType) {
+    // Compute proper type based on the current value in the cell.
+    if (property_cell_value->IsSmi()) {
+      property_cell_value_type = type_cache_.kSmi;
+    } else if (property_cell_value->IsNumber()) {
+      property_cell_value_type = type_cache_.kHeapNumber;
+    } else {
+      Handle<Map> property_cell_value_map(
+          Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+      property_cell_value_type =
+          Type::Class(property_cell_value_map, graph()->zone());
     }
-
-    // Load from constant/undefined global property can be constant-folded.
-    if ((property_details.cell_type() == PropertyCellType::kConstant ||
-         property_details.cell_type() == PropertyCellType::kUndefined)) {
-      Node* value = jsgraph()->Constant(property_cell_value);
-      ReplaceWithValue(node, value);
-      return Replace(value);
-    }
-
-    // Load from constant type cell can benefit from type feedback.
-    if (property_details.cell_type() == PropertyCellType::kConstantType) {
-      // Compute proper type based on the current value in the cell.
-      if (property_cell_value->IsSmi()) {
-        property_cell_value_type = type_cache_.kSmi;
-      } else if (property_cell_value->IsNumber()) {
-        property_cell_value_type = type_cache_.kHeapNumber;
-      } else {
-        Handle<Map> property_cell_value_map(
-            Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
-        property_cell_value_type =
-            Type::Class(property_cell_value_map, graph()->zone());
-      }
-    }
-  } else if (property_details.IsConfigurable()) {
-    // Access to configurable global properties requires deoptimization support.
-    return NoChange();
   }
   Node* value = effect = graph()->NewNode(
       simplified()->LoadField(
@@ -178,9 +165,8 @@
       return NoChange();
     }
     case PropertyCellType::kConstant: {
-      // Store to constant property cell requires deoptimization support,
-      // because we might even need to eager deoptimize for mismatch.
-      if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+      // Record a code dependency on the cell, and just deoptimize if the new
+      // value doesn't match the previous value stored inside the cell.
       dependencies()->AssumePropertyCell(property_cell);
       Node* check =
           graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
@@ -193,13 +179,13 @@
                            frame_state, effect, if_false);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+      Revisit(graph()->end());
       control = graph()->NewNode(common()->IfTrue(), branch);
       break;
     }
     case PropertyCellType::kConstantType: {
-      // Store to constant-type property cell requires deoptimization support,
-      // because we might even need to eager deoptimize for mismatch.
-      if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+      // Record a code dependency on the cell, and just deoptimize if the new
+      // values' type doesn't match the type of the previous value in the cell.
       dependencies()->AssumePropertyCell(property_cell);
       Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
       Type* property_cell_value_type = Type::TaggedSigned();
@@ -213,6 +199,7 @@
                              frame_state, effect, if_true);
         // TODO(bmeurer): This should be on the AdvancedReducer somehow.
         NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+        Revisit(graph()->end());
         control = graph()->NewNode(common()->IfFalse(), branch);
 
         // Load the {value} map check against the {property_cell} map.
@@ -234,6 +221,7 @@
                            frame_state, effect, if_false);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+      Revisit(graph()->end());
       control = graph()->NewNode(common()->IfTrue(), branch);
       effect = graph()->NewNode(
           simplified()->StoreField(
@@ -243,13 +231,11 @@
     }
     case PropertyCellType::kMutable: {
       // Store to non-configurable, data property on the global can be lowered
-      // to a field store, even without deoptimization, because the property
-      // cannot be deleted or reconfigured to an accessor/interceptor property.
+      // to a field store, even without recording a code dependency on the cell,
+      // because the property cannot be deleted or reconfigured to an accessor
+      // or interceptor property.
       if (property_details.IsConfigurable()) {
-        // With deoptimization support, we can lower stores even to configurable
-        // data properties on the global object, by adding a code dependency on
-        // the cell.
-        if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+        // Protect lowering by recording a code dependency on the cell.
         dependencies()->AssumePropertyCell(property_cell);
       }
       effect = graph()->NewNode(
diff --git a/src/compiler/js-global-object-specialization.h b/src/compiler/js-global-object-specialization.h
index 83d890c..3ffc67a 100644
--- a/src/compiler/js-global-object-specialization.h
+++ b/src/compiler/js-global-object-specialization.h
@@ -5,7 +5,6 @@
 #ifndef V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
 #define V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
 
-#include "src/base/flags.h"
 #include "src/compiler/graph-reducer.h"
 
 namespace v8 {
@@ -30,14 +29,7 @@
 // nodes.
 class JSGlobalObjectSpecialization final : public AdvancedReducer {
  public:
-  // Flags that control the mode of operation.
-  enum Flag {
-    kNoFlags = 0u,
-    kDeoptimizationEnabled = 1u << 0,
-  };
-  typedef base::Flags<Flag> Flags;
-
-  JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
+  JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph,
                                MaybeHandle<Context> native_context,
                                CompilationDependencies* dependencies);
 
@@ -61,12 +53,10 @@
   CommonOperatorBuilder* common() const;
   JSOperatorBuilder* javascript() const;
   SimplifiedOperatorBuilder* simplified() const;
-  Flags flags() const { return flags_; }
   MaybeHandle<Context> native_context() const { return native_context_; }
   CompilationDependencies* dependencies() const { return dependencies_; }
 
   JSGraph* const jsgraph_;
-  Flags const flags_;
   MaybeHandle<Context> native_context_;
   CompilationDependencies* const dependencies_;
   TypeCache const& type_cache_;
@@ -74,8 +64,6 @@
   DISALLOW_COPY_AND_ASSIGN(JSGlobalObjectSpecialization);
 };
 
-DEFINE_OPERATORS_FOR_FLAGS(JSGlobalObjectSpecialization::Flags)
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 99a1547..2244f9b 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -205,6 +205,7 @@
       case IrOpcode::kThrow:
         NodeProperties::MergeControlToEnd(jsgraph_->graph(), jsgraph_->common(),
                                           input);
+        Revisit(jsgraph_->graph()->end());
         break;
       default:
         UNREACHABLE();
@@ -243,8 +244,7 @@
                                             Handle<SharedFunctionInfo> shared) {
   const FrameStateFunctionInfo* state_info =
       jsgraph_->common()->CreateFrameStateFunctionInfo(
-          frame_state_type, parameter_count + 1, 0, shared,
-          CALL_MAINTAINS_NATIVE_CONTEXT);
+          frame_state_type, parameter_count + 1, 0, shared);
 
   const Operator* op = jsgraph_->common()->FrameState(
       BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
@@ -267,10 +267,18 @@
 namespace {
 
 // TODO(mstarzinger,verwaest): Move this predicate onto SharedFunctionInfo?
-bool NeedsImplicitReceiver(Handle<JSFunction> function, Isolate* isolate) {
-  Code* construct_stub = function->shared()->construct_stub();
-  return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub() &&
-         construct_stub != *isolate->builtins()->ConstructedNonConstructable();
+bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
+  DisallowHeapAllocation no_gc;
+  Isolate* const isolate = shared_info->GetIsolate();
+  Code* const construct_stub = shared_info->construct_stub();
+  return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub();
+}
+
+bool IsNonConstructible(Handle<SharedFunctionInfo> shared_info) {
+  DisallowHeapAllocation no_gc;
+  Isolate* const isolate = shared_info->GetIsolate();
+  Code* const construct_stub = shared_info->construct_stub();
+  return construct_stub == *isolate->builtins()->ConstructedNonConstructable();
 }
 
 }  // namespace
@@ -294,20 +302,21 @@
 Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
   DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
   JSCallAccessor call(node);
+  Handle<SharedFunctionInfo> shared_info(function->shared());
 
   // Function must be inlineable.
-  if (!function->shared()->IsInlineable()) {
+  if (!shared_info->IsInlineable()) {
     TRACE("Not inlining %s into %s because callee is not inlineable\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
 
   // Constructor must be constructable.
   if (node->opcode() == IrOpcode::kJSCallConstruct &&
-      !function->IsConstructor()) {
+      IsNonConstructible(shared_info)) {
     TRACE("Not inlining %s into %s because constructor is not constructable.\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
@@ -315,17 +324,17 @@
   // Class constructors are callable, but [[Call]] will raise an exception.
   // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
   if (node->opcode() == IrOpcode::kJSCallFunction &&
-      IsClassConstructor(function->shared()->kind())) {
+      IsClassConstructor(shared_info->kind())) {
     TRACE("Not inlining %s into %s because callee is a class constructor.\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
 
   // Function contains break points.
-  if (function->shared()->HasDebugInfo()) {
+  if (shared_info->HasDebugInfo()) {
     TRACE("Not inlining %s into %s because callee may contain break points\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
@@ -341,7 +350,7 @@
   if (function->context()->native_context() !=
       info_->context()->native_context()) {
     TRACE("Not inlining %s into %s because of different native contexts\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
@@ -352,12 +361,12 @@
   for (Node* frame_state = call.frame_state_after();
        frame_state->opcode() == IrOpcode::kFrameState;
        frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
-    FrameStateInfo const& info = OpParameter<FrameStateInfo>(frame_state);
-    Handle<SharedFunctionInfo> shared_info;
-    if (info.shared_info().ToHandle(&shared_info) &&
-        *shared_info == function->shared()) {
+    FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+    Handle<SharedFunctionInfo> frame_shared_info;
+    if (frame_info.shared_info().ToHandle(&frame_shared_info) &&
+        *frame_shared_info == *shared_info) {
       TRACE("Not inlining %s into %s because call is recursive\n",
-            function->shared()->DebugName()->ToCString().get(),
+            shared_info->DebugName()->ToCString().get(),
             info_->shared_info()->DebugName()->ToCString().get());
       return NoChange();
     }
@@ -366,7 +375,7 @@
   // TODO(turbofan): Inlining into a try-block is not yet supported.
   if (NodeProperties::IsExceptionalCall(node)) {
     TRACE("Not inlining %s into %s because of surrounding try-block\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
@@ -374,13 +383,11 @@
   Zone zone;
   ParseInfo parse_info(&zone, function);
   CompilationInfo info(&parse_info);
-  if (info_->is_deoptimization_enabled()) {
-    info.MarkAsDeoptimizationEnabled();
-  }
+  if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
 
   if (!Compiler::ParseAndAnalyze(info.parse_info())) {
     TRACE("Not inlining %s into %s because parsing failed\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     if (info_->isolate()->has_pending_exception()) {
       info_->isolate()->clear_pending_exception();
@@ -394,28 +401,28 @@
   if (is_strong(info.language_mode()) &&
       call.formal_arguments() < parameter_count) {
     TRACE("Not inlining %s into %s because too few arguments for strong mode\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
 
   if (!Compiler::EnsureDeoptimizationSupport(&info)) {
     TRACE("Not inlining %s into %s because deoptimization support failed\n",
-          function->shared()->DebugName()->ToCString().get(),
+          shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
     return NoChange();
   }
   // Remember that we inlined this function. This needs to be called right
   // after we ensure deoptimization support so that the code flusher
   // does not remove the code with the deoptimization support.
-  info_->AddInlinedFunction(info.shared_info());
+  info_->AddInlinedFunction(shared_info);
 
   // ----------------------------------------------------------------
   // After this point, we've made a decision to inline this function.
   // We shall not bailout from inlining if we got here.
 
   TRACE("Inlining %s into %s\n",
-        function->shared()->DebugName()->ToCString().get(),
+        shared_info->DebugName()->ToCString().get(),
         info_->shared_info()->DebugName()->ToCString().get());
 
   // TODO(mstarzinger): We could use the temporary zone for the graph because
@@ -442,7 +449,7 @@
   // Note that the context has to be the callers context (input to call node).
   Node* receiver = jsgraph_->UndefinedConstant();  // Implicit receiver.
   if (node->opcode() == IrOpcode::kJSCallConstruct &&
-      NeedsImplicitReceiver(function, info_->isolate())) {
+      NeedsImplicitReceiver(shared_info)) {
     Node* effect = NodeProperties::GetEffectInput(node);
     Node* context = NodeProperties::GetContextInput(node);
     Node* create = jsgraph_->graph()->NewNode(
@@ -491,7 +498,7 @@
   // in that frame state tho, as the conversion of the receiver can be repeated
   // any number of times, it's not observable.
   if (node->opcode() == IrOpcode::kJSCallFunction &&
-      is_sloppy(info.language_mode()) && !function->shared()->native()) {
+      is_sloppy(info.language_mode()) && !shared_info->native()) {
     const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
     Node* effect = NodeProperties::GetEffectInput(node);
     Node* convert = jsgraph_->graph()->NewNode(
@@ -509,7 +516,7 @@
   if (call.formal_arguments() != parameter_count) {
     frame_state = CreateArtificialFrameState(
         node, frame_state, call.formal_arguments(),
-        FrameStateType::kArgumentsAdaptor, info.shared_info());
+        FrameStateType::kArgumentsAdaptor, shared_info);
   }
 
   return InlineCall(node, new_target, context, frame_state, start, end);
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index ca5cb93..abeb110 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -49,20 +49,14 @@
       return ReduceIncrementStatsCounter(node);
     case Runtime::kInlineIsArray:
       return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
-    case Runtime::kInlineIsDate:
-      return ReduceIsInstanceType(node, JS_DATE_TYPE);
     case Runtime::kInlineIsTypedArray:
       return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
-    case Runtime::kInlineIsFunction:
-      return ReduceIsFunction(node);
     case Runtime::kInlineIsRegExp:
       return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
     case Runtime::kInlineIsJSReceiver:
       return ReduceIsJSReceiver(node);
     case Runtime::kInlineIsSmi:
       return ReduceIsSmi(node);
-    case Runtime::kInlineJSValueGetValue:
-      return ReduceJSValueGetValue(node);
     case Runtime::kInlineMathClz32:
       return ReduceMathClz32(node);
     case Runtime::kInlineMathFloor:
@@ -71,8 +65,6 @@
       return ReduceMathSqrt(node);
     case Runtime::kInlineValueOf:
       return ReduceValueOf(node);
-    case Runtime::kInlineIsMinusZero:
-      return ReduceIsMinusZero(node);
     case Runtime::kInlineFixedArrayGet:
       return ReduceFixedArrayGet(node);
     case Runtime::kInlineFixedArraySet:
@@ -148,6 +140,7 @@
       graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
                        frame_state, effect, control);
   NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+  Revisit(graph()->end());
 
   node->TrimInputCount(0);
   NodeProperties::ChangeOp(node, common()->Dead());
@@ -229,89 +222,8 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceIsFunction(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Type* value_type = NodeProperties::GetType(value);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  if (value_type->Is(Type::Function())) {
-    value = jsgraph()->TrueConstant();
-  } else {
-    // if (%_IsSmi(value)) {
-    //   return false;
-    // } else {
-    //   return FIRST_FUNCTION_TYPE <= %_GetInstanceType(%_GetMap(value))
-    // }
-    STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
-
-    Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
-    Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
-    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-    Node* etrue = effect;
-    Node* vtrue = jsgraph()->FalseConstant();
-
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, effect, if_false),
-        effect, if_false);
-    Node* vfalse =
-        graph()->NewNode(machine()->Uint32LessThanOrEqual(),
-                         jsgraph()->Int32Constant(FIRST_FUNCTION_TYPE), efalse);
-
-    control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-    effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-    value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                             vtrue, vfalse, control);
-  }
-  ReplaceWithValue(node, node, effect, control);
-  return Replace(value);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceIsJSReceiver(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Type* value_type = NodeProperties::GetType(value);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  if (value_type->Is(Type::Receiver())) {
-    value = jsgraph()->TrueConstant();
-  } else if (!value_type->Maybe(Type::Receiver())) {
-    value = jsgraph()->FalseConstant();
-  } else {
-    // if (%_IsSmi(value)) {
-    //   return false;
-    // } else {
-    //   return FIRST_JS_RECEIVER_TYPE <= %_GetInstanceType(%_GetMap(value))
-    // }
-    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-
-    Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
-    Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
-    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-    Node* etrue = effect;
-    Node* vtrue = jsgraph()->FalseConstant();
-
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, effect, if_false),
-        effect, if_false);
-    Node* vfalse = graph()->NewNode(
-        machine()->Uint32LessThanOrEqual(),
-        jsgraph()->Int32Constant(FIRST_JS_RECEIVER_TYPE), efalse);
-
-    control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-    effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-    value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                             vtrue, vfalse, control);
-  }
-  ReplaceWithValue(node, node, effect, control);
-  return Replace(value);
+  return Change(node, simplified()->ObjectIsReceiver());
 }
 
 
@@ -320,15 +232,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceJSValueGetValue(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  return Change(node, simplified()->LoadField(AccessBuilder::ForValue()), value,
-                effect, control);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceMathClz32(Node* node) {
   return Change(node, machine()->Word32Clz());
 }
@@ -420,30 +323,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceIsMinusZero(Node* node) {
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-
-  Node* double_lo =
-      graph()->NewNode(machine()->Float64ExtractLowWord32(), value);
-  Node* check1 = graph()->NewNode(machine()->Word32Equal(), double_lo,
-                                  jsgraph()->ZeroConstant());
-
-  Node* double_hi =
-      graph()->NewNode(machine()->Float64ExtractHighWord32(), value);
-  Node* check2 = graph()->NewNode(
-      machine()->Word32Equal(), double_hi,
-      jsgraph()->Int32Constant(static_cast<int32_t>(0x80000000)));
-
-  ReplaceWithValue(node, node, effect);
-
-  Node* and_result = graph()->NewNode(machine()->Word32And(), check1, check2);
-
-  return Change(node, machine()->Word32Equal(), and_result,
-                jsgraph()->Int32Constant(1));
-}
-
-
 Reduction JSIntrinsicLowering::ReduceFixedArrayGet(Node* node) {
   Node* base = node->InputAt(0);
   Node* index = node->InputAt(1);
@@ -507,12 +386,43 @@
 
 Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
   Node* value = NodeProperties::GetValueInput(node, 0);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // ToInteger is a no-op on integer values and -0.
   Type* value_type = NodeProperties::GetType(value);
   if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
     ReplaceWithValue(node, value);
     return Replace(value);
   }
-  return NoChange();
+
+  Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+  Node* branch =
+      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* etrue = effect;
+  Node* vtrue = value;
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* efalse = effect;
+  Node* vfalse;
+  {
+    vfalse = efalse =
+        graph()->NewNode(javascript()->CallRuntime(Runtime::kToInteger), value,
+                         context, frame_state, efalse, if_false);
+    if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
+  }
+
+  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+                           vtrue, vfalse, control);
+  // TODO(bmeurer, mstarzinger): Rewire IfException inputs to {vfalse}.
+  ReplaceWithValue(node, value, effect, control);
+  return Changed(value);
 }
 
 
@@ -589,20 +499,20 @@
 
 Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
   size_t const arity = CallRuntimeParametersOf(node->op()).arity();
-  NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
-                                       ConvertReceiverMode::kAny,
-                                       TailCallMode::kDisallow));
+  NodeProperties::ChangeOp(node,
+                           javascript()->CallFunction(arity, VectorSlotPair(),
+                                                      ConvertReceiverMode::kAny,
+                                                      TailCallMode::kDisallow));
   return Changed(node);
 }
 
 
 Reduction JSIntrinsicLowering::ReduceTailCall(Node* node) {
   size_t const arity = CallRuntimeParametersOf(node->op()).arity();
-  NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
-                                       ConvertReceiverMode::kAny,
-                                       TailCallMode::kAllow));
+  NodeProperties::ChangeOp(node,
+                           javascript()->CallFunction(arity, VectorSlotPair(),
+                                                      ConvertReceiverMode::kAny,
+                                                      TailCallMode::kAllow));
   return Changed(node);
 }
 
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index 1977a58..d8e1102 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -44,12 +44,9 @@
   Reduction ReduceDoubleHi(Node* node);
   Reduction ReduceDoubleLo(Node* node);
   Reduction ReduceIncrementStatsCounter(Node* node);
-  Reduction ReduceIsMinusZero(Node* node);
   Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
-  Reduction ReduceIsFunction(Node* node);
   Reduction ReduceIsJSReceiver(Node* node);
   Reduction ReduceIsSmi(Node* node);
-  Reduction ReduceJSValueGetValue(Node* node);
   Reduction ReduceMathClz32(Node* node);
   Reduction ReduceMathFloor(Node* node);
   Reduction ReduceMathSqrt(Node* node);
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index 06cf770..2c11794 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -38,6 +38,8 @@
 
 Reduction JSNativeContextSpecialization::Reduce(Node* node) {
   switch (node->opcode()) {
+    case IrOpcode::kJSLoadContext:
+      return ReduceJSLoadContext(node);
     case IrOpcode::kJSLoadNamed:
       return ReduceJSLoadNamed(node);
     case IrOpcode::kJSStoreNamed:
@@ -52,6 +54,21 @@
   return NoChange();
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+  ContextAccess const& access = ContextAccessOf(node->op());
+  Handle<Context> native_context;
+  // Specialize JSLoadContext(NATIVE_CONTEXT_INDEX) to the known native
+  // context (if any), so we can constant-fold those fields, which is
+  // safe, since the NATIVE_CONTEXT_INDEX slot is always immutable.
+  if (access.index() == Context::NATIVE_CONTEXT_INDEX &&
+      GetNativeContext(node).ToHandle(&native_context)) {
+    Node* value = jsgraph()->HeapConstant(native_context);
+    ReplaceWithValue(node, value);
+    return Replace(value);
+  }
+  return NoChange();
+}
 
 Reduction JSNativeContextSpecialization::ReduceNamedAccess(
     Node* node, Node* value, MapHandleList const& receiver_maps,
@@ -418,6 +435,7 @@
                        frame_state, exit_effect, exit_control);
   // TODO(bmeurer): This should be on the AdvancedReducer somehow.
   NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+  Revisit(graph()->end());
 
   // Generate the final merge point for all (polymorphic) branches.
   int const control_count = static_cast<int>(controls.size());
@@ -443,21 +461,49 @@
 }
 
 
+Reduction JSNativeContextSpecialization::ReduceNamedAccess(
+    Node* node, Node* value, FeedbackNexus const& nexus, Handle<Name> name,
+    AccessMode access_mode, LanguageMode language_mode) {
+  DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
+         node->opcode() == IrOpcode::kJSStoreNamed);
+
+  // Check if the {nexus} reports type feedback for the IC.
+  if (nexus.IsUninitialized()) {
+    if ((flags() & kDeoptimizationEnabled) &&
+        (flags() & kBailoutOnUninitialized)) {
+      // TODO(turbofan): Implement all eager bailout points correctly in
+      // the graph builder.
+      Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+      if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
+        return ReduceSoftDeoptimize(node);
+      }
+    }
+    return NoChange();
+  }
+
+  // Extract receiver maps from the IC using the {nexus}.
+  MapHandleList receiver_maps;
+  if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+  DCHECK_LT(0, receiver_maps.length());
+
+  // Try to lower the named access based on the {receiver_maps}.
+  return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
+                           language_mode);
+}
+
+
 Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
   NamedAccess const& p = NamedAccessOf(node->op());
   Node* const value = jsgraph()->Dead();
 
   // Extract receiver maps from the LOAD_IC using the LoadICNexus.
-  MapHandleList receiver_maps;
   if (!p.feedback().IsValid()) return NoChange();
   LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
-  if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
-  DCHECK_LT(0, receiver_maps.length());
 
   // Try to lower the named access based on the {receiver_maps}.
-  return ReduceNamedAccess(node, value, receiver_maps, p.name(),
-                           AccessMode::kLoad, p.language_mode());
+  return ReduceNamedAccess(node, value, nexus, p.name(), AccessMode::kLoad,
+                           p.language_mode());
 }
 
 
@@ -467,15 +513,12 @@
   Node* const value = NodeProperties::GetValueInput(node, 1);
 
   // Extract receiver maps from the STORE_IC using the StoreICNexus.
-  MapHandleList receiver_maps;
   if (!p.feedback().IsValid()) return NoChange();
   StoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
-  if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
-  DCHECK_LT(0, receiver_maps.length());
 
   // Try to lower the named access based on the {receiver_maps}.
-  return ReduceNamedAccess(node, value, receiver_maps, p.name(),
-                           AccessMode::kStore, p.language_mode());
+  return ReduceNamedAccess(node, value, nexus, p.name(), AccessMode::kStore,
+                           p.language_mode());
 }
 
 
@@ -705,7 +748,7 @@
     Type* element_type = Type::Any();
     MachineType element_machine_type = MachineType::AnyTagged();
     if (IsFastDoubleElementsKind(elements_kind)) {
-      element_type = type_cache_.kFloat64;
+      element_type = Type::Number();
       element_machine_type = MachineType::Float64();
     } else if (IsFastSmiElementsKind(elements_kind)) {
       element_type = type_cache_.kSmi;
@@ -850,6 +893,7 @@
                        frame_state, exit_effect, exit_control);
   // TODO(bmeurer): This should be on the AdvancedReducer somehow.
   NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+  Revisit(graph()->end());
 
   // Generate the final merge point for all (polymorphic) branches.
   int const control_count = static_cast<int>(controls.size());
@@ -882,6 +926,20 @@
   DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
          node->opcode() == IrOpcode::kJSStoreProperty);
 
+  // Check if the {nexus} reports type feedback for the IC.
+  if (nexus.IsUninitialized()) {
+    if ((flags() & kDeoptimizationEnabled) &&
+        (flags() & kBailoutOnUninitialized)) {
+      // TODO(turbofan): Implement all eager bailout points correctly in
+      // the graph builder.
+      Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+      if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
+        return ReduceSoftDeoptimize(node);
+      }
+    }
+    return NoChange();
+  }
+
   // Extract receiver maps from the {nexus}.
   MapHandleList receiver_maps;
   if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
@@ -921,6 +979,22 @@
 }
 
 
+Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(Node* node) {
+  Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* deoptimize =
+      graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft), frame_state,
+                       effect, control);
+  // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+  NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+  Revisit(graph()->end());
+  node->TrimInputCount(0);
+  NodeProperties::ChangeOp(node, common()->Dead());
+  return Changed(node);
+}
+
+
 Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
   PropertyAccess const& p = PropertyAccessOf(node->op());
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
index 45ff87f..4251d72 100644
--- a/src/compiler/js-native-context-specialization.h
+++ b/src/compiler/js-native-context-specialization.h
@@ -38,7 +38,8 @@
   // Flags that control the mode of operation.
   enum Flag {
     kNoFlags = 0u,
-    kDeoptimizationEnabled = 1u << 0,
+    kBailoutOnUninitialized = 1u << 0,
+    kDeoptimizationEnabled = 1u << 1,
   };
   typedef base::Flags<Flag> Flags;
 
@@ -50,6 +51,7 @@
   Reduction Reduce(Node* node) final;
 
  private:
+  Reduction ReduceJSLoadContext(Node* node);
   Reduction ReduceJSLoadNamed(Node* node);
   Reduction ReduceJSStoreNamed(Node* node);
   Reduction ReduceJSLoadProperty(Node* node);
@@ -66,11 +68,17 @@
                               LanguageMode language_mode,
                               KeyedAccessStoreMode store_mode);
   Reduction ReduceNamedAccess(Node* node, Node* value,
+                              FeedbackNexus const& nexus, Handle<Name> name,
+                              AccessMode access_mode,
+                              LanguageMode language_mode);
+  Reduction ReduceNamedAccess(Node* node, Node* value,
                               MapHandleList const& receiver_maps,
                               Handle<Name> name, AccessMode access_mode,
                               LanguageMode language_mode,
                               Node* index = nullptr);
 
+  Reduction ReduceSoftDeoptimize(Node* node);
+
   // Adds stability dependencies on all prototypes of every class in
   // {receiver_type} up to (and including) the {holder}.
   void AssumePrototypesStable(Type* receiver_type,
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index 1455f0a..5fcd519 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -52,63 +52,6 @@
 }
 
 
-size_t hash_value(TailCallMode mode) {
-  return base::hash_value(static_cast<unsigned>(mode));
-}
-
-
-std::ostream& operator<<(std::ostream& os, TailCallMode mode) {
-  switch (mode) {
-    case TailCallMode::kAllow:
-      return os << "ALLOW_TAIL_CALLS";
-    case TailCallMode::kDisallow:
-      return os << "DISALLOW_TAIL_CALLS";
-  }
-  UNREACHABLE();
-  return os;
-}
-
-
-bool operator==(BinaryOperationParameters const& lhs,
-                BinaryOperationParameters const& rhs) {
-  return lhs.language_mode() == rhs.language_mode() &&
-         lhs.hints() == rhs.hints();
-}
-
-
-bool operator!=(BinaryOperationParameters const& lhs,
-                BinaryOperationParameters const& rhs) {
-  return !(lhs == rhs);
-}
-
-
-size_t hash_value(BinaryOperationParameters const& p) {
-  return base::hash_combine(p.language_mode(), p.hints());
-}
-
-
-std::ostream& operator<<(std::ostream& os, BinaryOperationParameters const& p) {
-  return os << p.language_mode() << ", " << p.hints();
-}
-
-
-BinaryOperationParameters const& BinaryOperationParametersOf(
-    Operator const* op) {
-  DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
-         op->opcode() == IrOpcode::kJSBitwiseXor ||
-         op->opcode() == IrOpcode::kJSBitwiseAnd ||
-         op->opcode() == IrOpcode::kJSShiftLeft ||
-         op->opcode() == IrOpcode::kJSShiftRight ||
-         op->opcode() == IrOpcode::kJSShiftRightLogical ||
-         op->opcode() == IrOpcode::kJSAdd ||
-         op->opcode() == IrOpcode::kJSSubtract ||
-         op->opcode() == IrOpcode::kJSMultiply ||
-         op->opcode() == IrOpcode::kJSDivide ||
-         op->opcode() == IrOpcode::kJSModulus);
-  return OpParameter<BinaryOperationParameters>(op);
-}
-
-
 bool operator==(CallConstructParameters const& lhs,
                 CallConstructParameters const& rhs) {
   return lhs.arity() == rhs.arity() && lhs.feedback() == rhs.feedback();
@@ -138,8 +81,7 @@
 
 
 std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
-  os << p.arity() << ", " << p.language_mode() << ", " << p.convert_mode()
-     << ", " << p.tail_call_mode();
+  os << p.arity() << ", " << p.convert_mode() << ", " << p.tail_call_mode();
   return os;
 }
 
@@ -216,38 +158,6 @@
 }
 
 
-DynamicAccess::DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode)
-    : name_(name), typeof_mode_(typeof_mode) {}
-
-
-bool operator==(DynamicAccess const& lhs, DynamicAccess const& rhs) {
-  UNIMPLEMENTED();
-  return true;
-}
-
-
-bool operator!=(DynamicAccess const& lhs, DynamicAccess const& rhs) {
-  return !(lhs == rhs);
-}
-
-
-size_t hash_value(DynamicAccess const& access) {
-  UNIMPLEMENTED();
-  return 0;
-}
-
-
-std::ostream& operator<<(std::ostream& os, DynamicAccess const& access) {
-  return os << Brief(*access.name()) << ", " << access.typeof_mode();
-}
-
-
-DynamicAccess const& DynamicAccessOf(Operator const* op) {
-  DCHECK_EQ(IrOpcode::kJSLoadDynamic, op->opcode());
-  return OpParameter<DynamicAccess>(op);
-}
-
-
 bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
   return lhs.name().location() == rhs.name().location() &&
          lhs.language_mode() == rhs.language_mode() &&
@@ -367,32 +277,9 @@
 }
 
 
-bool operator==(CreateArgumentsParameters const& lhs,
-                CreateArgumentsParameters const& rhs) {
-  return lhs.type() == rhs.type() && lhs.start_index() == rhs.start_index();
-}
-
-
-bool operator!=(CreateArgumentsParameters const& lhs,
-                CreateArgumentsParameters const& rhs) {
-  return !(lhs == rhs);
-}
-
-
-size_t hash_value(CreateArgumentsParameters const& p) {
-  return base::hash_combine(p.type(), p.start_index());
-}
-
-
-std::ostream& operator<<(std::ostream& os, CreateArgumentsParameters const& p) {
-  return os << p.type() << ", " << p.start_index();
-}
-
-
-const CreateArgumentsParameters& CreateArgumentsParametersOf(
-    const Operator* op) {
+CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op) {
   DCHECK_EQ(IrOpcode::kJSCreateArguments, op->opcode());
-  return OpParameter<CreateArgumentsParameters>(op);
+  return OpParameter<CreateArgumentsType>(op);
 }
 
 
@@ -486,12 +373,15 @@
   return OpParameter<CreateLiteralParameters>(op);
 }
 
-
 #define CACHED_OP_LIST(V)                                  \
   V(Equal, Operator::kNoProperties, 2, 1)                  \
   V(NotEqual, Operator::kNoProperties, 2, 1)               \
   V(StrictEqual, Operator::kNoThrow, 2, 1)                 \
   V(StrictNotEqual, Operator::kNoThrow, 2, 1)              \
+  V(LessThan, Operator::kNoProperties, 2, 1)               \
+  V(GreaterThan, Operator::kNoProperties, 2, 1)            \
+  V(LessThanOrEqual, Operator::kNoProperties, 2, 1)        \
+  V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)     \
   V(ToNumber, Operator::kNoProperties, 1, 1)               \
   V(ToString, Operator::kNoProperties, 1, 1)               \
   V(ToName, Operator::kNoProperties, 1, 1)                 \
@@ -512,14 +402,6 @@
   V(CreateWithContext, Operator::kNoProperties, 2, 1)      \
   V(CreateModuleContext, Operator::kNoProperties, 2, 1)
 
-
-#define CACHED_OP_LIST_WITH_LANGUAGE_MODE(V)        \
-  V(LessThan, Operator::kNoProperties, 2, 1)        \
-  V(GreaterThan, Operator::kNoProperties, 2, 1)     \
-  V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
-  V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)
-
-
 struct JSOperatorGlobalCache final {
 #define CACHED(Name, properties, value_input_count, value_output_count)  \
   struct Name##Operator final : public Operator {                        \
@@ -533,25 +415,6 @@
   Name##Operator k##Name##Operator;
   CACHED_OP_LIST(CACHED)
 #undef CACHED
-
-
-#define CACHED_WITH_LANGUAGE_MODE(Name, properties, value_input_count,        \
-                                  value_output_count)                         \
-  template <LanguageMode kLanguageMode>                                       \
-  struct Name##Operator final : public Operator1<LanguageMode> {              \
-    Name##Operator()                                                          \
-        : Operator1<LanguageMode>(                                            \
-              IrOpcode::kJS##Name, properties, "JS" #Name, value_input_count, \
-              Operator::ZeroIfPure(properties),                               \
-              Operator::ZeroIfEliminatable(properties), value_output_count,   \
-              Operator::ZeroIfPure(properties),                               \
-              Operator::ZeroIfNoThrow(properties), kLanguageMode) {}          \
-  };                                                                          \
-  Name##Operator<SLOPPY> k##Name##SloppyOperator;                             \
-  Name##Operator<STRICT> k##Name##StrictOperator;                             \
-  Name##Operator<STRONG> k##Name##StrongOperator;
-  CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
-#undef CACHED_WITH_LANGUAGE_MODE
 };
 
 
@@ -570,156 +433,104 @@
 CACHED_OP_LIST(CACHED)
 #undef CACHED
 
-
-#define CACHED_WITH_LANGUAGE_MODE(Name, properties, value_input_count,  \
-                                  value_output_count)                   \
-  const Operator* JSOperatorBuilder::Name(LanguageMode language_mode) { \
-    switch (language_mode) {                                            \
-      case SLOPPY:                                                      \
-        return &cache_.k##Name##SloppyOperator;                         \
-      case STRICT:                                                      \
-        return &cache_.k##Name##StrictOperator;                         \
-      case STRONG:                                                      \
-        return &cache_.k##Name##StrongOperator;                         \
-      default:                                                          \
-        break; /* %*!%^$#@ */                                           \
-    }                                                                   \
-    UNREACHABLE();                                                      \
-    return nullptr;                                                     \
-  }
-CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
-#undef CACHED_WITH_LANGUAGE_MODE
-
-
-const Operator* JSOperatorBuilder::BitwiseOr(LanguageMode language_mode,
-                                             BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::BitwiseOr(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSBitwiseOr, Operator::kNoProperties,       // opcode
-      "JSBitwiseOr",                                         // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSBitwiseOr, Operator::kNoProperties,  // opcode
+      "JSBitwiseOr",                                    // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::BitwiseXor(LanguageMode language_mode,
-                                              BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::BitwiseXor(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSBitwiseXor, Operator::kNoProperties,      // opcode
-      "JSBitwiseXor",                                        // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(   //--
+      IrOpcode::kJSBitwiseXor, Operator::kNoProperties,  // opcode
+      "JSBitwiseXor",                                    // name
+      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
+      hints);                                            // parameter
 }
 
-
-const Operator* JSOperatorBuilder::BitwiseAnd(LanguageMode language_mode,
-                                              BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::BitwiseAnd(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSBitwiseAnd, Operator::kNoProperties,      // opcode
-      "JSBitwiseAnd",                                        // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(   //--
+      IrOpcode::kJSBitwiseAnd, Operator::kNoProperties,  // opcode
+      "JSBitwiseAnd",                                    // name
+      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
+      hints);                                            // parameter
 }
 
-
-const Operator* JSOperatorBuilder::ShiftLeft(LanguageMode language_mode,
-                                             BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::ShiftLeft(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSShiftLeft, Operator::kNoProperties,       // opcode
-      "JSShiftLeft",                                         // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSShiftLeft, Operator::kNoProperties,  // opcode
+      "JSShiftLeft",                                    // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::ShiftRight(LanguageMode language_mode,
-                                              BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::ShiftRight(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSShiftRight, Operator::kNoProperties,      // opcode
-      "JSShiftRight",                                        // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(   //--
+      IrOpcode::kJSShiftRight, Operator::kNoProperties,  // opcode
+      "JSShiftRight",                                    // name
+      2, 1, 1, 1, 1, 2,                                  // inputs/outputs
+      hints);                                            // parameter
 }
 
-
 const Operator* JSOperatorBuilder::ShiftRightLogical(
-    LanguageMode language_mode, BinaryOperationHints hints) {
+    BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(     //--
+  return new (zone()) Operator1<BinaryOperationHints>(          //--
       IrOpcode::kJSShiftRightLogical, Operator::kNoProperties,  // opcode
       "JSShiftRightLogical",                                    // name
       2, 1, 1, 1, 1, 2,  // inputs/outputs
-      parameters);       // parameter
+      hints);            // parameter
 }
 
-
-const Operator* JSOperatorBuilder::Add(LanguageMode language_mode,
-                                       BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Add(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSAdd, Operator::kNoProperties,             // opcode
-      "JSAdd",                                               // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSAdd, Operator::kNoProperties,        // opcode
+      "JSAdd",                                          // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::Subtract(LanguageMode language_mode,
-                                            BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Subtract(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSSubtract, Operator::kNoProperties,        // opcode
-      "JSSubtract",                                          // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSSubtract, Operator::kNoProperties,   // opcode
+      "JSSubtract",                                     // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::Multiply(LanguageMode language_mode,
-                                            BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Multiply(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSMultiply, Operator::kNoProperties,        // opcode
-      "JSMultiply",                                          // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSMultiply, Operator::kNoProperties,   // opcode
+      "JSMultiply",                                     // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::Divide(LanguageMode language_mode,
-                                          BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Divide(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSDivide, Operator::kNoProperties,          // opcode
-      "JSDivide",                                            // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSDivide, Operator::kNoProperties,     // opcode
+      "JSDivide",                                       // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
-
-const Operator* JSOperatorBuilder::Modulus(LanguageMode language_mode,
-                                           BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Modulus(BinaryOperationHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
-  BinaryOperationParameters parameters(language_mode, hints);
-  return new (zone()) Operator1<BinaryOperationParameters>(  //--
-      IrOpcode::kJSModulus, Operator::kNoProperties,         // opcode
-      "JSModulus",                                           // name
-      2, 1, 1, 1, 1, 2,                                      // inputs/outputs
-      parameters);                                           // parameter
+  return new (zone()) Operator1<BinaryOperationHints>(  //--
+      IrOpcode::kJSModulus, Operator::kNoProperties,    // opcode
+      "JSModulus",                                      // name
+      2, 1, 1, 1, 1, 2,                                 // inputs/outputs
+      hints);                                           // parameter
 }
 
 
@@ -732,12 +543,11 @@
       hints);                                           // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CallFunction(
-    size_t arity, LanguageMode language_mode, VectorSlotPair const& feedback,
+    size_t arity, VectorSlotPair const& feedback,
     ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
-  CallFunctionParameters parameters(arity, language_mode, feedback,
-                                    tail_call_mode, convert_mode);
+  CallFunctionParameters parameters(arity, feedback, tail_call_mode,
+                                    convert_mode);
   return new (zone()) Operator1<CallFunctionParameters>(   // --
       IrOpcode::kJSCallFunction, Operator::kNoProperties,  // opcode
       "JSCallFunction",                                    // name
@@ -746,10 +556,22 @@
 }
 
 
+const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id) {
+  const Runtime::Function* f = Runtime::FunctionForId(id);
+  return CallRuntime(f, f->nargs);
+}
+
+
 const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id,
                                                size_t arity) {
-  CallRuntimeParameters parameters(id, arity);
-  const Runtime::Function* f = Runtime::FunctionForId(parameters.id());
+  const Runtime::Function* f = Runtime::FunctionForId(id);
+  return CallRuntime(f, arity);
+}
+
+
+const Operator* JSOperatorBuilder::CallRuntime(const Runtime::Function* f,
+                                               size_t arity) {
+  CallRuntimeParameters parameters(f->function_id, arity);
   DCHECK(f->nargs == -1 || f->nargs == static_cast<int>(parameters.arity()));
   return new (zone()) Operator1<CallRuntimeParameters>(   // --
       IrOpcode::kJSCallRuntime, Operator::kNoProperties,  // opcode
@@ -779,11 +601,9 @@
       convert_mode);                                     // parameter
 }
 
-
-const Operator* JSOperatorBuilder::LoadNamed(LanguageMode language_mode,
-                                             Handle<Name> name,
+const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
                                              const VectorSlotPair& feedback) {
-  NamedAccess access(language_mode, name, feedback);
+  NamedAccess access(SLOPPY, name, feedback);
   return new (zone()) Operator1<NamedAccess>(           // --
       IrOpcode::kJSLoadNamed, Operator::kNoProperties,  // opcode
       "JSLoadNamed",                                    // name
@@ -791,10 +611,9 @@
       access);                                          // parameter
 }
 
-
 const Operator* JSOperatorBuilder::LoadProperty(
-    LanguageMode language_mode, VectorSlotPair const& feedback) {
-  PropertyAccess access(language_mode, feedback);
+    VectorSlotPair const& feedback) {
+  PropertyAccess access(SLOPPY, feedback);
   return new (zone()) Operator1<PropertyAccess>(           // --
       IrOpcode::kJSLoadProperty, Operator::kNoProperties,  // opcode
       "JSLoadProperty",                                    // name
@@ -882,26 +701,12 @@
 }
 
 
-const Operator* JSOperatorBuilder::LoadDynamic(const Handle<String>& name,
-                                               TypeofMode typeof_mode) {
-  DynamicAccess access(name, typeof_mode);
-  return new (zone()) Operator1<DynamicAccess>(           // --
-      IrOpcode::kJSLoadDynamic, Operator::kNoProperties,  // opcode
-      "JSLoadDynamic",                                    // name
-      2, 1, 1, 1, 1, 2,                                   // counts
-      access);                                            // parameter
-}
-
-
-const Operator* JSOperatorBuilder::CreateArguments(
-    CreateArgumentsParameters::Type type, int start_index) {
-  DCHECK_IMPLIES(start_index, type == CreateArgumentsParameters::kRestArray);
-  CreateArgumentsParameters parameters(type, start_index);
-  return new (zone()) Operator1<CreateArgumentsParameters>(  // --
-      IrOpcode::kJSCreateArguments, Operator::kNoThrow,      // opcode
-      "JSCreateArguments",                                   // name
-      1, 1, 1, 1, 1, 0,                                      // counts
-      parameters);                                           // parameter
+const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
+  return new (zone()) Operator1<CreateArgumentsType>(    // --
+      IrOpcode::kJSCreateArguments, Operator::kNoThrow,  // opcode
+      "JSCreateArguments",                               // name
+      1, 1, 1, 1, 1, 0,                                  // counts
+      type);                                             // parameter
 }
 
 
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index ca7c7ea..070e71e 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -51,42 +51,6 @@
 ToBooleanHints ToBooleanHintsOf(Operator const* op);
 
 
-// Defines whether tail call optimization is allowed.
-enum class TailCallMode : unsigned { kAllow, kDisallow };
-
-size_t hash_value(TailCallMode);
-
-std::ostream& operator<<(std::ostream&, TailCallMode);
-
-
-// Defines the language mode and hints for a JavaScript binary operations.
-// This is used as parameter by JSAdd, JSSubtract, etc. operators.
-class BinaryOperationParameters final {
- public:
-  BinaryOperationParameters(LanguageMode language_mode,
-                            BinaryOperationHints hints)
-      : language_mode_(language_mode), hints_(hints) {}
-
-  LanguageMode language_mode() const { return language_mode_; }
-  BinaryOperationHints hints() const { return hints_; }
-
- private:
-  LanguageMode const language_mode_;
-  BinaryOperationHints const hints_;
-};
-
-bool operator==(BinaryOperationParameters const&,
-                BinaryOperationParameters const&);
-bool operator!=(BinaryOperationParameters const&,
-                BinaryOperationParameters const&);
-
-size_t hash_value(BinaryOperationParameters const&);
-
-std::ostream& operator<<(std::ostream&, BinaryOperationParameters const&);
-
-BinaryOperationParameters const& BinaryOperationParametersOf(Operator const*);
-
-
 // Defines the arity and the feedback for a JavaScript constructor call. This is
 // used as a parameter by JSCallConstruct operators.
 class CallConstructParameters final {
@@ -116,20 +80,15 @@
 // used as a parameter by JSCallFunction operators.
 class CallFunctionParameters final {
  public:
-  CallFunctionParameters(size_t arity, LanguageMode language_mode,
-                         VectorSlotPair const& feedback,
+  CallFunctionParameters(size_t arity, VectorSlotPair const& feedback,
                          TailCallMode tail_call_mode,
                          ConvertReceiverMode convert_mode)
       : bit_field_(ArityField::encode(arity) |
                    ConvertReceiverModeField::encode(convert_mode) |
-                   LanguageModeField::encode(language_mode) |
                    TailCallModeField::encode(tail_call_mode)),
         feedback_(feedback) {}
 
   size_t arity() const { return ArityField::decode(bit_field_); }
-  LanguageMode language_mode() const {
-    return LanguageModeField::decode(bit_field_);
-  }
   ConvertReceiverMode convert_mode() const {
     return ConvertReceiverModeField::decode(bit_field_);
   }
@@ -151,9 +110,8 @@
     return base::hash_combine(p.bit_field_, p.feedback_);
   }
 
-  typedef BitField<size_t, 0, 27> ArityField;
-  typedef BitField<ConvertReceiverMode, 27, 2> ConvertReceiverModeField;
-  typedef BitField<LanguageMode, 29, 2> LanguageModeField;
+  typedef BitField<size_t, 0, 29> ArityField;
+  typedef BitField<ConvertReceiverMode, 29, 2> ConvertReceiverModeField;
   typedef BitField<TailCallMode, 31, 1> TailCallModeField;
 
   const uint32_t bit_field_;
@@ -221,30 +179,6 @@
 ContextAccess const& ContextAccessOf(Operator const*);
 
 
-// Defines the name for a dynamic variable lookup. This is used as a parameter
-// by JSLoadDynamic and JSStoreDynamic operators.
-class DynamicAccess final {
- public:
-  DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode);
-
-  const Handle<String>& name() const { return name_; }
-  TypeofMode typeof_mode() const { return typeof_mode_; }
-
- private:
-  const Handle<String> name_;
-  const TypeofMode typeof_mode_;
-};
-
-size_t hash_value(DynamicAccess const&);
-
-bool operator==(DynamicAccess const&, DynamicAccess const&);
-bool operator!=(DynamicAccess const&, DynamicAccess const&);
-
-std::ostream& operator<<(std::ostream&, DynamicAccess const&);
-
-DynamicAccess const& DynamicAccessOf(Operator const*);
-
-
 // Defines the property of an object for a named access. This is
 // used as a parameter by the JSLoadNamed and JSStoreNamed operators.
 class NamedAccess final {
@@ -356,33 +290,8 @@
 PropertyAccess const& PropertyAccessOf(const Operator* op);
 
 
-// Defines specifics about arguments object or rest parameter creation. This is
-// used as a parameter by JSCreateArguments operators.
-class CreateArgumentsParameters final {
- public:
-  enum Type { kMappedArguments, kUnmappedArguments, kRestArray };
-  CreateArgumentsParameters(Type type, int start_index)
-      : type_(type), start_index_(start_index) {}
-
-  Type type() const { return type_; }
-  int start_index() const { return start_index_; }
-
- private:
-  const Type type_;
-  const int start_index_;
-};
-
-bool operator==(CreateArgumentsParameters const&,
-                CreateArgumentsParameters const&);
-bool operator!=(CreateArgumentsParameters const&,
-                CreateArgumentsParameters const&);
-
-size_t hash_value(CreateArgumentsParameters const&);
-
-std::ostream& operator<<(std::ostream&, CreateArgumentsParameters const&);
-
-const CreateArgumentsParameters& CreateArgumentsParametersOf(
-    const Operator* op);
+// CreateArgumentsType is used as parameter to JSCreateArguments nodes.
+CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op);
 
 
 // Defines shared information for the array that should be created. This is
@@ -475,31 +384,21 @@
   const Operator* NotEqual();
   const Operator* StrictEqual();
   const Operator* StrictNotEqual();
-  const Operator* LessThan(LanguageMode language_mode);
-  const Operator* GreaterThan(LanguageMode language_mode);
-  const Operator* LessThanOrEqual(LanguageMode language_mode);
-  const Operator* GreaterThanOrEqual(LanguageMode language_mode);
-  const Operator* BitwiseOr(LanguageMode language_mode,
-                            BinaryOperationHints hints);
-  const Operator* BitwiseXor(LanguageMode language_mode,
-                             BinaryOperationHints hints);
-  const Operator* BitwiseAnd(LanguageMode language_mode,
-                             BinaryOperationHints hints);
-  const Operator* ShiftLeft(LanguageMode language_mode,
-                            BinaryOperationHints hints);
-  const Operator* ShiftRight(LanguageMode language_mode,
-                             BinaryOperationHints hints);
-  const Operator* ShiftRightLogical(LanguageMode language_mode,
-                                    BinaryOperationHints hints);
-  const Operator* Add(LanguageMode language_mode, BinaryOperationHints hints);
-  const Operator* Subtract(LanguageMode language_mode,
-                           BinaryOperationHints hints);
-  const Operator* Multiply(LanguageMode language_mode,
-                           BinaryOperationHints hints);
-  const Operator* Divide(LanguageMode language_mode,
-                         BinaryOperationHints hints);
-  const Operator* Modulus(LanguageMode language_mode,
-                          BinaryOperationHints hints);
+  const Operator* LessThan();
+  const Operator* GreaterThan();
+  const Operator* LessThanOrEqual();
+  const Operator* GreaterThanOrEqual();
+  const Operator* BitwiseOr(BinaryOperationHints hints);
+  const Operator* BitwiseXor(BinaryOperationHints hints);
+  const Operator* BitwiseAnd(BinaryOperationHints hints);
+  const Operator* ShiftLeft(BinaryOperationHints hints);
+  const Operator* ShiftRight(BinaryOperationHints hints);
+  const Operator* ShiftRightLogical(BinaryOperationHints hints);
+  const Operator* Add(BinaryOperationHints hints);
+  const Operator* Subtract(BinaryOperationHints hints);
+  const Operator* Multiply(BinaryOperationHints hints);
+  const Operator* Divide(BinaryOperationHints hints);
+  const Operator* Modulus(BinaryOperationHints hints);
 
   const Operator* ToBoolean(ToBooleanHints hints);
   const Operator* ToNumber();
@@ -509,8 +408,7 @@
   const Operator* Yield();
 
   const Operator* Create();
-  const Operator* CreateArguments(CreateArgumentsParameters::Type type,
-                                  int start_index);
+  const Operator* CreateArguments(CreateArgumentsType type);
   const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
   const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
                                 PretenureFlag pretenure);
@@ -523,19 +421,18 @@
                                       int literal_flags, int literal_index);
 
   const Operator* CallFunction(
-      size_t arity, LanguageMode language_mode,
-      VectorSlotPair const& feedback = VectorSlotPair(),
+      size_t arity, VectorSlotPair const& feedback = VectorSlotPair(),
       ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
       TailCallMode tail_call_mode = TailCallMode::kDisallow);
+  const Operator* CallRuntime(Runtime::FunctionId id);
   const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
+  const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
   const Operator* CallConstruct(size_t arity, VectorSlotPair const& feedback);
 
   const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
 
-  const Operator* LoadProperty(LanguageMode language_mode,
-                               VectorSlotPair const& feedback);
-  const Operator* LoadNamed(LanguageMode language_mode, Handle<Name> name,
-                            VectorSlotPair const& feedback);
+  const Operator* LoadProperty(VectorSlotPair const& feedback);
+  const Operator* LoadNamed(Handle<Name> name, VectorSlotPair const& feedback);
 
   const Operator* StoreProperty(LanguageMode language_mode,
                                 VectorSlotPair const& feedback);
@@ -556,9 +453,6 @@
   const Operator* LoadContext(size_t depth, size_t index, bool immutable);
   const Operator* StoreContext(size_t depth, size_t index);
 
-  const Operator* LoadDynamic(const Handle<String>& name,
-                              TypeofMode typeof_mode);
-
   const Operator* TypeOf();
   const Operator* InstanceOf();
 
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 5e0712a..11ae3a9 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -11,7 +11,6 @@
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
-#include "src/compiler/state-values-utils.h"
 #include "src/type-cache.h"
 #include "src/types.h"
 
@@ -19,86 +18,6 @@
 namespace internal {
 namespace compiler {
 
-namespace {
-
-// A helper class to construct inline allocations on the simplified operator
-// level. This keeps track of the effect chain for initial stores on a newly
-// allocated object and also provides helpers for commonly allocated objects.
-class AllocationBuilder final {
- public:
-  AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
-      : jsgraph_(jsgraph),
-        allocation_(nullptr),
-        effect_(effect),
-        control_(control) {}
-
-  // Primitive allocation of static size.
-  void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
-    effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
-    allocation_ =
-        graph()->NewNode(simplified()->Allocate(pretenure),
-                         jsgraph()->Constant(size), effect_, control_);
-    effect_ = allocation_;
-  }
-
-  // Primitive store into a field.
-  void Store(const FieldAccess& access, Node* value) {
-    effect_ = graph()->NewNode(simplified()->StoreField(access), allocation_,
-                               value, effect_, control_);
-  }
-
-  // Primitive store into an element.
-  void Store(ElementAccess const& access, Node* index, Node* value) {
-    effect_ = graph()->NewNode(simplified()->StoreElement(access), allocation_,
-                               index, value, effect_, control_);
-  }
-
-  // Compound allocation of a FixedArray.
-  void AllocateArray(int length, Handle<Map> map,
-                     PretenureFlag pretenure = NOT_TENURED) {
-    DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
-           map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
-    int size = (map->instance_type() == FIXED_ARRAY_TYPE)
-                   ? FixedArray::SizeFor(length)
-                   : FixedDoubleArray::SizeFor(length);
-    Allocate(size, pretenure);
-    Store(AccessBuilder::ForMap(), map);
-    Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
-  }
-
-  // Compound store of a constant into a field.
-  void Store(const FieldAccess& access, Handle<Object> value) {
-    Store(access, jsgraph()->Constant(value));
-  }
-
-  void FinishAndChange(Node* node) {
-    NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
-    node->ReplaceInput(0, allocation_);
-    node->ReplaceInput(1, effect_);
-    node->TrimInputCount(2);
-    NodeProperties::ChangeOp(node, common()->FinishRegion());
-  }
-
-  Node* Finish() {
-    return graph()->NewNode(common()->FinishRegion(), allocation_, effect_);
-  }
-
- protected:
-  JSGraph* jsgraph() { return jsgraph_; }
-  Graph* graph() { return jsgraph_->graph(); }
-  CommonOperatorBuilder* common() { return jsgraph_->common(); }
-  SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
-
- private:
-  JSGraph* const jsgraph_;
-  Node* allocation_;
-  Node* effect_;
-  Node* control_;
-};
-
-}  // namespace
-
-
 // A helper class to simplify the process of reducing a single binop node with a
 // JSOperator. This class manages the rewriting of context, control, and effect
 // dependencies during lowering of a binop and contains numerous helper
@@ -218,17 +137,6 @@
     return ChangeToPureOperator(op, false, type);
   }
 
-  // TODO(turbofan): Strong mode should be killed soonish!
-  bool IsStrong() const {
-    if (node_->opcode() == IrOpcode::kJSLessThan ||
-        node_->opcode() == IrOpcode::kJSLessThanOrEqual ||
-        node_->opcode() == IrOpcode::kJSGreaterThan ||
-        node_->opcode() == IrOpcode::kJSGreaterThanOrEqual) {
-      return is_strong(OpParameter<LanguageMode>(node_));
-    }
-    return is_strong(BinaryOperationParametersOf(node_->op()).language_mode());
-  }
-
   bool LeftInputIs(Type* t) { return left_type()->Is(t); }
 
   bool RightInputIs(Type* t) { return right_type()->Is(t); }
@@ -457,7 +365,7 @@
     // JSAdd(x:number, y:number) => NumberAdd(x, y)
     return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
-  if (r.NeitherInputCanBe(Type::StringOrReceiver()) && !r.IsStrong()) {
+  if (r.NeitherInputCanBe(Type::StringOrReceiver())) {
     // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
     Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
     r.ConvertInputsToNumber(frame_state);
@@ -499,7 +407,7 @@
   if (flags() & kDisableBinaryOpReduction) return NoChange();
 
   JSBinopReduction r(this, node);
-  if (r.IsStrong() || numberOp == simplified()->NumberModulus()) {
+  if (numberOp == simplified()->NumberModulus()) {
     if (r.BothInputsAre(Type::Number())) {
       return r.ChangeToPureOperator(numberOp, Type::Number());
     }
@@ -515,13 +423,6 @@
   if (flags() & kDisableBinaryOpReduction) return NoChange();
 
   JSBinopReduction r(this, node);
-  if (r.IsStrong()) {
-    if (r.BothInputsAre(Type::Number())) {
-      r.ConvertInputsToUI32(kSigned, kSigned);
-      return r.ChangeToPureOperator(intOp, Type::Integral32());
-    }
-    return NoChange();
-  }
   Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
   r.ConvertInputsToNumber(frame_state);
   r.ConvertInputsToUI32(kSigned, kSigned);
@@ -535,13 +436,6 @@
   if (flags() & kDisableBinaryOpReduction) return NoChange();
 
   JSBinopReduction r(this, node);
-  if (r.IsStrong()) {
-    if (r.BothInputsAre(Type::Number())) {
-      r.ConvertInputsToUI32(left_signedness, kUnsigned);
-      return r.ChangeToPureOperator(shift_op);
-    }
-    return NoChange();
-  }
   Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
   r.ConvertInputsToNumber(frame_state);
   r.ConvertInputsToUI32(left_signedness, kUnsigned);
@@ -588,9 +482,6 @@
       less_than_or_equal = machine()->Int32LessThanOrEqual();
     } else {
       // TODO(turbofan): mixed signed/unsigned int32 comparisons.
-      if (r.IsStrong() && !r.BothInputsAre(Type::Number())) {
-        return NoChange();
-      }
       Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
       r.ConvertInputsToNumber(frame_state);
       less_than = simplified()->NumberLessThan();
@@ -780,8 +671,18 @@
       }
     }
   }
-  // Check if we have a cached conversion.
+  // Try constant-folding of JSToNumber with constant inputs.
   Type* input_type = NodeProperties::GetType(input);
+  if (input_type->IsConstant()) {
+    Handle<Object> input_value = input_type->AsConstant()->Value();
+    if (input_value->IsString()) {
+      return Replace(jsgraph()->Constant(
+          String::ToNumber(Handle<String>::cast(input_value))));
+    } else if (input_value->IsOddball()) {
+      return Replace(jsgraph()->Constant(
+          Oddball::ToNumber(Handle<Oddball>::cast(input_value))));
+    }
+  }
   if (input_type->Is(Type::Number())) {
     // JSToNumber(x:number) => x
     return Changed(input);
@@ -1221,7 +1122,7 @@
   // If we need an access check or the object is a Proxy, make a runtime call
   // to finish the lowering.
   Node* bool_result_runtime_has_in_proto_chain_case = graph()->NewNode(
-      javascript()->CallRuntime(Runtime::kHasInPrototypeChain, 2), r.left(),
+      javascript()->CallRuntime(Runtime::kHasInPrototypeChain), r.left(),
       prototype, context, frame_state, effect, control);
 
   control = graph()->NewNode(common()->IfFalse(), branch_is_proxy);
@@ -1422,663 +1323,6 @@
 }
 
 
-namespace {
-
-// Maximum instance size for which allocations will be inlined.
-const int kMaxInlineInstanceSize = 64 * kPointerSize;
-
-
-// Checks whether allocation using the given constructor can be inlined.
-bool IsAllocationInlineable(Handle<JSFunction> constructor) {
-  // TODO(bmeurer): Further relax restrictions on inlining, i.e.
-  // instance type and maybe instance size (inobject properties
-  // are limited anyways by the runtime).
-  return constructor->has_initial_map() &&
-         constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
-         constructor->initial_map()->instance_size() < kMaxInlineInstanceSize;
-}
-
-}  // namespace
-
-
-Reduction JSTypedLowering::ReduceJSCreate(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
-  Node* const target = NodeProperties::GetValueInput(node, 0);
-  Type* const target_type = NodeProperties::GetType(target);
-  Node* const new_target = NodeProperties::GetValueInput(node, 1);
-  Node* const effect = NodeProperties::GetEffectInput(node);
-  // TODO(turbofan): Add support for NewTarget passed to JSCreate.
-  if (target != new_target) return NoChange();
-  // Extract constructor function.
-  if (target_type->IsConstant() &&
-      target_type->AsConstant()->Value()->IsJSFunction()) {
-    Handle<JSFunction> constructor =
-        Handle<JSFunction>::cast(target_type->AsConstant()->Value());
-    DCHECK(constructor->IsConstructor());
-    // Force completion of inobject slack tracking before
-    // generating code to finalize the instance size.
-    constructor->CompleteInobjectSlackTrackingIfActive();
-
-    // TODO(bmeurer): We fall back to the runtime in case we cannot inline
-    // the allocation here, which is sort of expensive. We should think about
-    // a soft fallback to some NewObjectCodeStub.
-    if (IsAllocationInlineable(constructor)) {
-      // Compute instance size from initial map of {constructor}.
-      Handle<Map> initial_map(constructor->initial_map(), isolate());
-      int const instance_size = initial_map->instance_size();
-
-      // Add a dependency on the {initial_map} to make sure that this code is
-      // deoptimized whenever the {initial_map} of the {constructor} changes.
-      dependencies()->AssumeInitialMapCantChange(initial_map);
-
-      // Emit code to allocate the JSObject instance for the {constructor}.
-      AllocationBuilder a(jsgraph(), effect, graph()->start());
-      a.Allocate(instance_size);
-      a.Store(AccessBuilder::ForMap(), initial_map);
-      a.Store(AccessBuilder::ForJSObjectProperties(),
-              jsgraph()->EmptyFixedArrayConstant());
-      a.Store(AccessBuilder::ForJSObjectElements(),
-              jsgraph()->EmptyFixedArrayConstant());
-      for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
-        a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
-                jsgraph()->UndefinedConstant());
-      }
-      a.FinishAndChange(node);
-      return Changed(node);
-    }
-  }
-  return NoChange();
-}
-
-
-namespace {
-
-// Retrieves the frame state holding actual argument values.
-Node* GetArgumentsFrameState(Node* frame_state) {
-  Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
-  FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
-  return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
-             ? outer_state
-             : frame_state;
-}
-
-}  // namespace
-
-
-Reduction JSTypedLowering::ReduceJSCreateArguments(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
-  CreateArgumentsParameters const& p = CreateArgumentsParametersOf(node->op());
-  Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
-  Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
-  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-
-  // Use the ArgumentsAccessStub for materializing both mapped and unmapped
-  // arguments object, but only for non-inlined (i.e. outermost) frames.
-  if (outer_state->opcode() != IrOpcode::kFrameState) {
-    Isolate* isolate = jsgraph()->isolate();
-    int parameter_count = state_info.parameter_count() - 1;
-    int parameter_offset = parameter_count * kPointerSize;
-    int offset = StandardFrameConstants::kCallerSPOffset + parameter_offset;
-    Node* parameter_pointer = graph()->NewNode(
-        machine()->IntAdd(), graph()->NewNode(machine()->LoadFramePointer()),
-        jsgraph()->IntPtrConstant(offset));
-
-    if (p.type() != CreateArgumentsParameters::kRestArray) {
-      Handle<SharedFunctionInfo> shared;
-      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
-      bool unmapped = p.type() == CreateArgumentsParameters::kUnmappedArguments;
-      Callable callable = CodeFactory::ArgumentsAccess(
-          isolate, unmapped, shared->has_duplicate_parameters());
-      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-          isolate, graph()->zone(), callable.descriptor(), 0,
-          CallDescriptor::kNeedsFrameState);
-      const Operator* new_op = common()->Call(desc);
-      Node* stub_code = jsgraph()->HeapConstant(callable.code());
-      node->InsertInput(graph()->zone(), 0, stub_code);
-      node->InsertInput(graph()->zone(), 2,
-                        jsgraph()->Constant(parameter_count));
-      node->InsertInput(graph()->zone(), 3, parameter_pointer);
-      NodeProperties::ChangeOp(node, new_op);
-      return Changed(node);
-    } else {
-      Callable callable = CodeFactory::RestArgumentsAccess(isolate);
-      CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-          isolate, graph()->zone(), callable.descriptor(), 0,
-          CallDescriptor::kNeedsFrameState);
-      const Operator* new_op = common()->Call(desc);
-      Node* stub_code = jsgraph()->HeapConstant(callable.code());
-      node->InsertInput(graph()->zone(), 0, stub_code);
-      node->ReplaceInput(1, jsgraph()->Constant(parameter_count));
-      node->InsertInput(graph()->zone(), 2, parameter_pointer);
-      node->InsertInput(graph()->zone(), 3,
-                        jsgraph()->Constant(p.start_index()));
-      NodeProperties::ChangeOp(node, new_op);
-      return Changed(node);
-    }
-  } else if (outer_state->opcode() == IrOpcode::kFrameState) {
-    // Use inline allocation for all mapped arguments objects within inlined
-    // (i.e. non-outermost) frames, independent of the object size.
-    if (p.type() == CreateArgumentsParameters::kMappedArguments) {
-      Handle<SharedFunctionInfo> shared;
-      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
-      Node* const callee = NodeProperties::GetValueInput(node, 0);
-      Node* const control = NodeProperties::GetControlInput(node);
-      Node* const context = NodeProperties::GetContextInput(node);
-      Node* effect = NodeProperties::GetEffectInput(node);
-      // TODO(mstarzinger): Duplicate parameters are not handled yet.
-      if (shared->has_duplicate_parameters()) return NoChange();
-      // Choose the correct frame state and frame state info depending on
-      // whether there conceptually is an arguments adaptor frame in the call
-      // chain.
-      Node* const args_state = GetArgumentsFrameState(frame_state);
-      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
-      // Prepare element backing store to be used by arguments object.
-      bool has_aliased_arguments = false;
-      Node* const elements = AllocateAliasedArguments(
-          effect, control, args_state, context, shared, &has_aliased_arguments);
-      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
-      // Load the arguments object map from the current native context.
-      Node* const load_native_context = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
-      Node* const load_arguments_map = effect = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForContextSlot(
-              has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
-                                    : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
-          load_native_context, effect, control);
-      // Actually allocate and initialize the arguments object.
-      AllocationBuilder a(jsgraph(), effect, control);
-      Node* properties = jsgraph()->EmptyFixedArrayConstant();
-      int length = args_state_info.parameter_count() - 1;  // Minus receiver.
-      STATIC_ASSERT(Heap::kSloppyArgumentsObjectSize == 5 * kPointerSize);
-      a.Allocate(Heap::kSloppyArgumentsObjectSize);
-      a.Store(AccessBuilder::ForMap(), load_arguments_map);
-      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
-      a.Store(AccessBuilder::ForJSObjectElements(), elements);
-      a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
-      a.Store(AccessBuilder::ForArgumentsCallee(), callee);
-      RelaxControls(node);
-      a.FinishAndChange(node);
-      return Changed(node);
-    } else if (p.type() == CreateArgumentsParameters::kUnmappedArguments) {
-      // Use inline allocation for all unmapped arguments objects within inlined
-      // (i.e. non-outermost) frames, independent of the object size.
-      Node* const control = NodeProperties::GetControlInput(node);
-      Node* const context = NodeProperties::GetContextInput(node);
-      Node* effect = NodeProperties::GetEffectInput(node);
-      // Choose the correct frame state and frame state info depending on
-      // whether there conceptually is an arguments adaptor frame in the call
-      // chain.
-      Node* const args_state = GetArgumentsFrameState(frame_state);
-      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
-      // Prepare element backing store to be used by arguments object.
-      Node* const elements = AllocateArguments(effect, control, args_state);
-      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
-      // Load the arguments object map from the current native context.
-      Node* const load_native_context = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
-      Node* const load_arguments_map = effect = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForContextSlot(
-              Context::STRICT_ARGUMENTS_MAP_INDEX)),
-          load_native_context, effect, control);
-      // Actually allocate and initialize the arguments object.
-      AllocationBuilder a(jsgraph(), effect, control);
-      Node* properties = jsgraph()->EmptyFixedArrayConstant();
-      int length = args_state_info.parameter_count() - 1;  // Minus receiver.
-      STATIC_ASSERT(Heap::kStrictArgumentsObjectSize == 4 * kPointerSize);
-      a.Allocate(Heap::kStrictArgumentsObjectSize);
-      a.Store(AccessBuilder::ForMap(), load_arguments_map);
-      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
-      a.Store(AccessBuilder::ForJSObjectElements(), elements);
-      a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
-      RelaxControls(node);
-      a.FinishAndChange(node);
-      return Changed(node);
-    } else if (p.type() == CreateArgumentsParameters::kRestArray) {
-      // Use inline allocation for all unmapped arguments objects within inlined
-      // (i.e. non-outermost) frames, independent of the object size.
-      Node* const control = NodeProperties::GetControlInput(node);
-      Node* const context = NodeProperties::GetContextInput(node);
-      Node* effect = NodeProperties::GetEffectInput(node);
-      // Choose the correct frame state and frame state info depending on
-      // whether there conceptually is an arguments adaptor frame in the call
-      // chain.
-      Node* const args_state = GetArgumentsFrameState(frame_state);
-      FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
-      // Prepare element backing store to be used by the rest array.
-      Node* const elements =
-          AllocateRestArguments(effect, control, args_state, p.start_index());
-      effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
-      // Load the JSArray object map from the current native context.
-      Node* const load_native_context = effect = graph()->NewNode(
-          javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
-      Node* const load_jsarray_map = effect = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForContextSlot(
-              Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX)),
-          load_native_context, effect, control);
-      // Actually allocate and initialize the jsarray.
-      AllocationBuilder a(jsgraph(), effect, control);
-      Node* properties = jsgraph()->EmptyFixedArrayConstant();
-
-      // -1 to minus receiver
-      int argument_count = args_state_info.parameter_count() - 1;
-      int length = std::max(0, argument_count - p.start_index());
-      STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-      a.Allocate(JSArray::kSize);
-      a.Store(AccessBuilder::ForMap(), load_jsarray_map);
-      a.Store(AccessBuilder::ForJSObjectProperties(), properties);
-      a.Store(AccessBuilder::ForJSObjectElements(), elements);
-      a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
-              jsgraph()->Constant(length));
-      RelaxControls(node);
-      a.FinishAndChange(node);
-      return Changed(node);
-    }
-  }
-
-  return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceNewArray(Node* node, Node* length,
-                                          int capacity,
-                                          Handle<AllocationSite> site) {
-  DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // Extract transition and tenuring feedback from the {site} and add
-  // appropriate code dependencies on the {site} if deoptimization is
-  // enabled.
-  PretenureFlag pretenure = site->GetPretenureMode();
-  ElementsKind elements_kind = site->GetElementsKind();
-  DCHECK(IsFastElementsKind(elements_kind));
-  if (flags() & kDeoptimizationEnabled) {
-    dependencies()->AssumeTenuringDecision(site);
-    dependencies()->AssumeTransitionStable(site);
-  }
-
-  // Retrieve the initial map for the array from the appropriate native context.
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  Node* js_array_map = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::ArrayMapIndex(elements_kind), true),
-      native_context, native_context, effect);
-
-  // Setup elements and properties.
-  Node* elements;
-  if (capacity == 0) {
-    elements = jsgraph()->EmptyFixedArrayConstant();
-  } else {
-    elements = effect =
-        AllocateElements(effect, control, elements_kind, capacity, pretenure);
-  }
-  Node* properties = jsgraph()->EmptyFixedArrayConstant();
-
-  // Perform the allocation of the actual JSArray object.
-  AllocationBuilder a(jsgraph(), effect, control);
-  a.Allocate(JSArray::kSize, pretenure);
-  a.Store(AccessBuilder::ForMap(), js_array_map);
-  a.Store(AccessBuilder::ForJSObjectProperties(), properties);
-  a.Store(AccessBuilder::ForJSObjectElements(), elements);
-  a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
-  RelaxControls(node);
-  a.FinishAndChange(node);
-  return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateArray(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
-  CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
-  Node* target = NodeProperties::GetValueInput(node, 0);
-  Node* new_target = NodeProperties::GetValueInput(node, 1);
-
-  // TODO(bmeurer): Optimize the subclassing case.
-  if (target != new_target) return NoChange();
-
-  // Check if we have a feedback {site} on the {node}.
-  Handle<AllocationSite> site = p.site();
-  if (p.site().is_null()) return NoChange();
-
-  // Attempt to inline calls to the Array constructor for the relevant cases
-  // where either no arguments are provided, or exactly one unsigned number
-  // argument is given.
-  if (site->CanInlineCall()) {
-    if (p.arity() == 0) {
-      Node* length = jsgraph()->ZeroConstant();
-      int capacity = JSArray::kPreallocatedArrayElements;
-      return ReduceNewArray(node, length, capacity, site);
-    } else if (p.arity() == 1) {
-      Node* length = NodeProperties::GetValueInput(node, 2);
-      Type* length_type = NodeProperties::GetType(length);
-      if (length_type->Is(type_cache_.kElementLoopUnrollType)) {
-        int capacity = static_cast<int>(length_type->Max());
-        return ReduceNewArray(node, length, capacity, site);
-      }
-    }
-  }
-
-  // Reduce {node} to the appropriate ArrayConstructorStub backend.
-  // Note that these stubs "behave" like JSFunctions, which means they
-  // expect a receiver on the stack, which they remove. We just push
-  // undefined for the receiver.
-  ElementsKind elements_kind = site->GetElementsKind();
-  AllocationSiteOverrideMode override_mode =
-      (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
-          ? DISABLE_ALLOCATION_SITES
-          : DONT_OVERRIDE;
-  if (p.arity() == 0) {
-    ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
-                                        override_mode);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
-        CallDescriptor::kNeedsFrameState);
-    node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
-    node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-    node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
-    NodeProperties::ChangeOp(node, common()->Call(desc));
-    return Changed(node);
-  } else if (p.arity() == 1) {
-    // TODO(bmeurer): Optimize for the 0 length non-holey case?
-    ArraySingleArgumentConstructorStub stub(
-        isolate(), GetHoleyElementsKind(elements_kind), override_mode);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
-        CallDescriptor::kNeedsFrameState);
-    node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
-    node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-    node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
-    node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
-    NodeProperties::ChangeOp(node, common()->Call(desc));
-    return Changed(node);
-  } else {
-    int const arity = static_cast<int>(p.arity());
-    ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
-                                        override_mode);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
-        arity + 1, CallDescriptor::kNeedsFrameState);
-    node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
-    node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
-    node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
-    node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
-    NodeProperties::ChangeOp(node, common()->Call(desc));
-    return Changed(node);
-  }
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
-  CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
-  Handle<SharedFunctionInfo> shared = p.shared_info();
-
-  // Use the FastNewClosureStub that allocates in new space only for nested
-  // functions that don't need literals cloning.
-  if (p.pretenure() == NOT_TENURED && shared->num_literals() == 0) {
-    Isolate* isolate = jsgraph()->isolate();
-    Callable callable = CodeFactory::FastNewClosure(
-        isolate, shared->language_mode(), shared->kind());
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate, graph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNoFlags);
-    const Operator* new_op = common()->Call(desc);
-    Node* stub_code = jsgraph()->HeapConstant(callable.code());
-    node->InsertInput(graph()->zone(), 0, stub_code);
-    node->InsertInput(graph()->zone(), 1, jsgraph()->HeapConstant(shared));
-    NodeProperties::ChangeOp(node, new_op);
-    return Changed(node);
-  }
-
-  return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateIterResultObject(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* done = NodeProperties::GetValueInput(node, 1);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* effect = NodeProperties::GetEffectInput(node);
-
-  // Load the JSIteratorResult map for the {context}.
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  Node* iterator_result_map = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
-      native_context, native_context, effect);
-
-  // Emit code to allocate the JSIteratorResult instance.
-  AllocationBuilder a(jsgraph(), effect, graph()->start());
-  a.Allocate(JSIteratorResult::kSize);
-  a.Store(AccessBuilder::ForMap(), iterator_result_map);
-  a.Store(AccessBuilder::ForJSObjectProperties(),
-          jsgraph()->EmptyFixedArrayConstant());
-  a.Store(AccessBuilder::ForJSObjectElements(),
-          jsgraph()->EmptyFixedArrayConstant());
-  a.Store(AccessBuilder::ForJSIteratorResultValue(), value);
-  a.Store(AccessBuilder::ForJSIteratorResultDone(), done);
-  STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
-  a.FinishAndChange(node);
-  return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateLiteralArray, node->opcode());
-  CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
-  Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
-  int const length = constants->length();
-  int const flags = p.flags();
-
-  // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
-  // initial length limit for arrays with "fast" elements kind.
-  // TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
-  if ((flags & ArrayLiteral::kShallowElements) != 0 &&
-      (flags & ArrayLiteral::kIsStrong) == 0 &&
-      length < JSArray::kInitialMaxFastElementArray) {
-    Isolate* isolate = jsgraph()->isolate();
-    Callable callable = CodeFactory::FastCloneShallowArray(isolate);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate, graph()->zone(), callable.descriptor(), 0,
-        (OperatorProperties::GetFrameStateInputCount(node->op()) != 0)
-            ? CallDescriptor::kNeedsFrameState
-            : CallDescriptor::kNoFlags);
-    const Operator* new_op = common()->Call(desc);
-    Node* stub_code = jsgraph()->HeapConstant(callable.code());
-    Node* literal_index = jsgraph()->SmiConstant(p.index());
-    Node* constant_elements = jsgraph()->HeapConstant(constants);
-    node->InsertInput(graph()->zone(), 0, stub_code);
-    node->InsertInput(graph()->zone(), 2, literal_index);
-    node->InsertInput(graph()->zone(), 3, constant_elements);
-    NodeProperties::ChangeOp(node, new_op);
-    return Changed(node);
-  }
-
-  return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateLiteralObject, node->opcode());
-  CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
-  Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
-  // Constants are pairs, see ObjectLiteral::properties_count().
-  int const length = constants->length() / 2;
-  int const flags = p.flags();
-
-  // Use the FastCloneShallowObjectStub only for shallow boilerplates without
-  // elements up to the number of properties that the stubs can handle.
-  if ((flags & ObjectLiteral::kShallowProperties) != 0 &&
-      length <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    Isolate* isolate = jsgraph()->isolate();
-    Callable callable = CodeFactory::FastCloneShallowObject(isolate, length);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate, graph()->zone(), callable.descriptor(), 0,
-        (OperatorProperties::GetFrameStateInputCount(node->op()) != 0)
-            ? CallDescriptor::kNeedsFrameState
-            : CallDescriptor::kNoFlags);
-    const Operator* new_op = common()->Call(desc);
-    Node* stub_code = jsgraph()->HeapConstant(callable.code());
-    Node* literal_index = jsgraph()->SmiConstant(p.index());
-    Node* literal_flags = jsgraph()->SmiConstant(flags);
-    Node* constant_elements = jsgraph()->HeapConstant(constants);
-    node->InsertInput(graph()->zone(), 0, stub_code);
-    node->InsertInput(graph()->zone(), 2, literal_index);
-    node->InsertInput(graph()->zone(), 3, constant_elements);
-    node->InsertInput(graph()->zone(), 4, literal_flags);
-    NodeProperties::ChangeOp(node, new_op);
-    return Changed(node);
-  }
-
-  return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateFunctionContext(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
-  int slot_count = OpParameter<int>(node->op());
-  Node* const closure = NodeProperties::GetValueInput(node, 0);
-
-  // Use inline allocation for function contexts up to a size limit.
-  if (slot_count < kFunctionContextAllocationLimit) {
-    // JSCreateFunctionContext[slot_count < limit]](fun)
-    Node* effect = NodeProperties::GetEffectInput(node);
-    Node* control = NodeProperties::GetControlInput(node);
-    Node* context = NodeProperties::GetContextInput(node);
-    Node* extension = jsgraph()->TheHoleConstant();
-    Node* native_context = effect = graph()->NewNode(
-        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-        context, context, effect);
-    AllocationBuilder a(jsgraph(), effect, control);
-    STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
-    int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
-    a.AllocateArray(context_length, factory()->function_context_map());
-    a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
-    a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
-    a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
-    a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-            native_context);
-    for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
-      a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
-    }
-    RelaxControls(node);
-    a.FinishAndChange(node);
-    return Changed(node);
-  }
-
-  // Use the FastNewContextStub only for function contexts up maximum size.
-  if (slot_count <= FastNewContextStub::kMaximumSlots) {
-    Isolate* isolate = jsgraph()->isolate();
-    Callable callable = CodeFactory::FastNewContext(isolate, slot_count);
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate, graph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNoFlags);
-    const Operator* new_op = common()->Call(desc);
-    Node* stub_code = jsgraph()->HeapConstant(callable.code());
-    node->InsertInput(graph()->zone(), 0, stub_code);
-    NodeProperties::ChangeOp(node, new_op);
-    return Changed(node);
-  }
-
-  return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
-  Node* object = NodeProperties::GetValueInput(node, 0);
-  Node* closure = NodeProperties::GetValueInput(node, 1);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  AllocationBuilder a(jsgraph(), effect, control);
-  STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
-  a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
-  a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
-  a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
-  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), object);
-  a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-          native_context);
-  RelaxControls(node);
-  a.FinishAndChange(node);
-  return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateCatchContext(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
-  Handle<String> name = OpParameter<Handle<String>>(node);
-  Node* exception = NodeProperties::GetValueInput(node, 0);
-  Node* closure = NodeProperties::GetValueInput(node, 1);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* native_context = effect = graph()->NewNode(
-      javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-      context, context, effect);
-  AllocationBuilder a(jsgraph(), effect, control);
-  STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
-  a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
-                  factory()->catch_context_map());
-  a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
-  a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
-  a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), name);
-  a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-          native_context);
-  a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
-          exception);
-  RelaxControls(node);
-  a.FinishAndChange(node);
-  return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
-  Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
-  int context_length = scope_info->ContextLength();
-  Node* const closure = NodeProperties::GetValueInput(node, 0);
-
-  // Use inline allocation for block contexts up to a size limit.
-  if (context_length < kBlockContextAllocationLimit) {
-    // JSCreateBlockContext[scope[length < limit]](fun)
-    Node* effect = NodeProperties::GetEffectInput(node);
-    Node* control = NodeProperties::GetControlInput(node);
-    Node* context = NodeProperties::GetContextInput(node);
-    Node* extension = jsgraph()->Constant(scope_info);
-    Node* native_context = effect = graph()->NewNode(
-        javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-        context, context, effect);
-    AllocationBuilder a(jsgraph(), effect, control);
-    STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
-    a.AllocateArray(context_length, factory()->block_context_map());
-    a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
-    a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
-    a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
-    a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
-            native_context);
-    for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
-      a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
-    }
-    RelaxControls(node);
-    a.FinishAndChange(node);
-    return Changed(node);
-  }
-
-  return NoChange();
-}
-
-
 Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
   CallConstructParameters const& p = CallConstructParametersOf(node->op());
@@ -2252,9 +1496,8 @@
   // Maybe we did at least learn something about the {receiver}.
   if (p.convert_mode() != convert_mode) {
     NodeProperties::ChangeOp(
-        node,
-        javascript()->CallFunction(p.arity(), p.language_mode(), p.feedback(),
-                                   convert_mode, p.tail_call_mode()));
+        node, javascript()->CallFunction(p.arity(), p.feedback(), convert_mode,
+                                         p.tail_call_mode()));
     return Changed(node);
   }
 
@@ -2270,159 +1513,6 @@
 }
 
 
-Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSForInPrepare, node->opcode());
-  Node* receiver = NodeProperties::GetValueInput(node, 0);
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // Get the set of properties to enumerate.
-  Node* cache_type = effect = graph()->NewNode(
-      javascript()->CallRuntime(Runtime::kGetPropertyNamesFast, 1), receiver,
-      context, frame_state, effect, control);
-  control = graph()->NewNode(common()->IfSuccess(), cache_type);
-
-  Node* receiver_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       receiver, effect, control);
-  Node* cache_type_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       cache_type, effect, control);
-  Node* meta_map = jsgraph()->HeapConstant(factory()->meta_map());
-
-  // If we got a map from the GetPropertyNamesFast runtime call, we can do a
-  // fast modification check. Otherwise, we got a fixed array, and we have to
-  // perform a slow check on every iteration.
-  Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
-                                  cache_type_map, meta_map);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* cache_array_true0;
-  Node* cache_length_true0;
-  Node* cache_type_true0;
-  Node* etrue0;
-  {
-    // Enum cache case.
-    Node* cache_type_enum_length = etrue0 = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapBitField3()), cache_type,
-        effect, if_true0);
-    cache_length_true0 = graph()->NewNode(
-        simplified()->NumberBitwiseAnd(), cache_type_enum_length,
-        jsgraph()->Int32Constant(Map::EnumLengthBits::kMask));
-
-    Node* check1 =
-        graph()->NewNode(machine()->Word32Equal(), cache_length_true0,
-                         jsgraph()->Int32Constant(0));
-    Node* branch1 =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* cache_array_true1;
-    Node* etrue1;
-    {
-      // No properties to enumerate.
-      cache_array_true1 =
-          jsgraph()->HeapConstant(factory()->empty_fixed_array());
-      etrue1 = etrue0;
-    }
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* cache_array_false1;
-    Node* efalse1;
-    {
-      // Load the enumeration cache from the instance descriptors of {receiver}.
-      Node* receiver_map_descriptors = efalse1 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForMapDescriptors()),
-          receiver_map, etrue0, if_false1);
-      Node* object_map_enum_cache = efalse1 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForDescriptorArrayEnumCache()),
-          receiver_map_descriptors, efalse1, if_false1);
-      cache_array_false1 = efalse1 = graph()->NewNode(
-          simplified()->LoadField(
-              AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache()),
-          object_map_enum_cache, efalse1, if_false1);
-    }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    etrue0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
-    cache_array_true0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                         cache_array_true1, cache_array_false1, if_true0);
-
-    cache_type_true0 = cache_type;
-  }
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* cache_array_false0;
-  Node* cache_length_false0;
-  Node* cache_type_false0;
-  Node* efalse0;
-  {
-    // FixedArray case.
-    cache_type_false0 = jsgraph()->OneConstant();  // Smi means slow check
-    cache_array_false0 = cache_type;
-    cache_length_false0 = efalse0 = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
-        cache_array_false0, effect, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  Node* cache_array =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_array_true0, cache_array_false0, control);
-  Node* cache_length =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_length_true0, cache_length_false0, control);
-  cache_type =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                       cache_type_true0, cache_type_false0, control);
-
-  for (auto edge : node->use_edges()) {
-    Node* const use = edge.from();
-    if (NodeProperties::IsEffectEdge(edge)) {
-      edge.UpdateTo(effect);
-      Revisit(use);
-    } else {
-      if (NodeProperties::IsControlEdge(edge)) {
-        if (use->opcode() == IrOpcode::kIfSuccess) {
-          Replace(use, control);
-        } else if (use->opcode() == IrOpcode::kIfException) {
-          edge.UpdateTo(cache_type_true0);
-          continue;
-        } else {
-          UNREACHABLE();
-        }
-      } else {
-        DCHECK(NodeProperties::IsValueEdge(edge));
-        DCHECK_EQ(IrOpcode::kProjection, use->opcode());
-        switch (ProjectionIndexOf(use->op())) {
-          case 0:
-            Replace(use, cache_type);
-            break;
-          case 1:
-            Replace(use, cache_array);
-            break;
-          case 2:
-            Replace(use, cache_length);
-            break;
-          default:
-            UNREACHABLE();
-            break;
-        }
-      }
-      use->Kill();
-    }
-  }
-  return NoChange();  // All uses were replaced already above.
-}
-
-
 Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
   Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -2464,38 +1554,12 @@
   Node* efalse0;
   Node* vfalse0;
   {
-    // Check if the {cache_type} is zero, which indicates proxy.
-    Node* check1 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
-                                    cache_type, jsgraph()->ZeroConstant());
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1;
-    Node* vtrue1;
-    {
-      // Don't do filtering for proxies.
-      etrue1 = effect;
-      vtrue1 = key;
-    }
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1;
-    Node* vfalse1;
-    {
-      // Filter the {key} to check if it's still a valid property of the
-      // {receiver} (does the ToName conversion implicitly).
-      vfalse1 = efalse1 = graph()->NewNode(
-          javascript()->CallRuntime(Runtime::kForInFilter, 2), receiver, key,
-          context, frame_state, effect, if_false1);
-      if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    efalse0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
-    vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                               vtrue1, vfalse1, if_false0);
+    // Filter the {key} to check if it's still a valid property of the
+    // {receiver} (does the ToName conversion implicitly).
+    vfalse0 = efalse0 = graph()->NewNode(
+        javascript()->CallRuntime(Runtime::kForInFilter), receiver, key,
+        context, frame_state, effect, if_false0);
+    if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
   }
 
   control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
@@ -2640,28 +1704,6 @@
       return ReduceJSStoreContext(node);
     case IrOpcode::kJSConvertReceiver:
       return ReduceJSConvertReceiver(node);
-    case IrOpcode::kJSCreate:
-      return ReduceJSCreate(node);
-    case IrOpcode::kJSCreateArguments:
-      return ReduceJSCreateArguments(node);
-    case IrOpcode::kJSCreateArray:
-      return ReduceJSCreateArray(node);
-    case IrOpcode::kJSCreateClosure:
-      return ReduceJSCreateClosure(node);
-    case IrOpcode::kJSCreateIterResultObject:
-      return ReduceJSCreateIterResultObject(node);
-    case IrOpcode::kJSCreateLiteralArray:
-      return ReduceJSCreateLiteralArray(node);
-    case IrOpcode::kJSCreateLiteralObject:
-      return ReduceJSCreateLiteralObject(node);
-    case IrOpcode::kJSCreateFunctionContext:
-      return ReduceJSCreateFunctionContext(node);
-    case IrOpcode::kJSCreateWithContext:
-      return ReduceJSCreateWithContext(node);
-    case IrOpcode::kJSCreateCatchContext:
-      return ReduceJSCreateCatchContext(node);
-    case IrOpcode::kJSCreateBlockContext:
-      return ReduceJSCreateBlockContext(node);
     case IrOpcode::kJSCallConstruct:
       return ReduceJSCallConstruct(node);
     case IrOpcode::kJSCallFunction:
@@ -2670,8 +1712,6 @@
       return ReduceJSForInDone(node);
     case IrOpcode::kJSForInNext:
       return ReduceJSForInNext(node);
-    case IrOpcode::kJSForInPrepare:
-      return ReduceJSForInPrepare(node);
     case IrOpcode::kJSForInStep:
       return ReduceJSForInStep(node);
     case IrOpcode::kSelect:
@@ -2690,139 +1730,6 @@
 }
 
 
-// Helper that allocates a FixedArray holding argument values recorded in the
-// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
-Node* JSTypedLowering::AllocateArguments(Node* effect, Node* control,
-                                         Node* frame_state) {
-  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
-  if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
-
-  // Prepare an iterator over argument values recorded in the frame state.
-  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
-  StateValuesAccess parameters_access(parameters);
-  auto parameters_it = ++parameters_access.begin();
-
-  // Actually allocate the backing store.
-  AllocationBuilder a(jsgraph(), effect, control);
-  a.AllocateArray(argument_count, factory()->fixed_array_map());
-  for (int i = 0; i < argument_count; ++i, ++parameters_it) {
-    a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
-  }
-  return a.Finish();
-}
-
-
-// Helper that allocates a FixedArray holding argument values recorded in the
-// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
-Node* JSTypedLowering::AllocateRestArguments(Node* effect, Node* control,
-                                             Node* frame_state,
-                                             int start_index) {
-  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
-  int num_elements = std::max(0, argument_count - start_index);
-  if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
-
-  // Prepare an iterator over argument values recorded in the frame state.
-  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
-  StateValuesAccess parameters_access(parameters);
-  auto parameters_it = ++parameters_access.begin();
-
-  // Skip unused arguments.
-  for (int i = 0; i < start_index; i++) {
-    ++parameters_it;
-  }
-
-  // Actually allocate the backing store.
-  AllocationBuilder a(jsgraph(), effect, control);
-  a.AllocateArray(num_elements, factory()->fixed_array_map());
-  for (int i = 0; i < num_elements; ++i, ++parameters_it) {
-    a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
-  }
-  return a.Finish();
-}
-
-
-// Helper that allocates a FixedArray serving as a parameter map for values
-// recorded in the given {frame_state}. Some elements map to slots within the
-// given {context}. Serves as backing store for JSCreateArguments nodes.
-Node* JSTypedLowering::AllocateAliasedArguments(
-    Node* effect, Node* control, Node* frame_state, Node* context,
-    Handle<SharedFunctionInfo> shared, bool* has_aliased_arguments) {
-  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-  int argument_count = state_info.parameter_count() - 1;  // Minus receiver.
-  if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
-
-  // If there is no aliasing, the arguments object elements are not special in
-  // any way, we can just return an unmapped backing store instead.
-  int parameter_count = shared->internal_formal_parameter_count();
-  if (parameter_count == 0) {
-    return AllocateArguments(effect, control, frame_state);
-  }
-
-  // Calculate number of argument values being aliased/mapped.
-  int mapped_count = Min(argument_count, parameter_count);
-  *has_aliased_arguments = true;
-
-  // Prepare an iterator over argument values recorded in the frame state.
-  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
-  StateValuesAccess parameters_access(parameters);
-  auto paratemers_it = ++parameters_access.begin();
-
-  // The unmapped argument values recorded in the frame state are stored yet
-  // another indirection away and then linked into the parameter map below,
-  // whereas mapped argument values are replaced with a hole instead.
-  AllocationBuilder aa(jsgraph(), effect, control);
-  aa.AllocateArray(argument_count, factory()->fixed_array_map());
-  for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
-    aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
-  }
-  for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
-    aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
-  }
-  Node* arguments = aa.Finish();
-
-  // Actually allocate the backing store.
-  AllocationBuilder a(jsgraph(), arguments, control);
-  a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
-  a.Store(AccessBuilder::ForFixedArraySlot(0), context);
-  a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
-  for (int i = 0; i < mapped_count; ++i) {
-    int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
-    a.Store(AccessBuilder::ForFixedArraySlot(i + 2), jsgraph()->Constant(idx));
-  }
-  return a.Finish();
-}
-
-
-Node* JSTypedLowering::AllocateElements(Node* effect, Node* control,
-                                        ElementsKind elements_kind,
-                                        int capacity, PretenureFlag pretenure) {
-  DCHECK_LE(1, capacity);
-  DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
-
-  Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
-                                 ? factory()->fixed_double_array_map()
-                                 : factory()->fixed_array_map();
-  ElementAccess access = IsFastDoubleElementsKind(elements_kind)
-                             ? AccessBuilder::ForFixedDoubleArrayElement()
-                             : AccessBuilder::ForFixedArrayElement();
-  Node* value =
-      IsFastDoubleElementsKind(elements_kind)
-          ? jsgraph()->Float64Constant(bit_cast<double>(kHoleNanInt64))
-          : jsgraph()->TheHoleConstant();
-
-  // Actually allocate the backing store.
-  AllocationBuilder a(jsgraph(), effect, control);
-  a.AllocateArray(capacity, elements_map, pretenure);
-  for (int i = 0; i < capacity; ++i) {
-    Node* index = jsgraph()->Constant(i);
-    a.Store(access, index, value);
-  }
-  return a.Finish();
-}
-
-
 Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
 
 
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 68ce74e..4621a45 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -68,41 +68,18 @@
   Reduction ReduceJSToString(Node* node);
   Reduction ReduceJSToObject(Node* node);
   Reduction ReduceJSConvertReceiver(Node* node);
-  Reduction ReduceJSCreate(Node* node);
-  Reduction ReduceJSCreateArguments(Node* node);
-  Reduction ReduceJSCreateArray(Node* node);
-  Reduction ReduceJSCreateClosure(Node* node);
-  Reduction ReduceJSCreateIterResultObject(Node* node);
-  Reduction ReduceJSCreateLiteralArray(Node* node);
-  Reduction ReduceJSCreateLiteralObject(Node* node);
-  Reduction ReduceJSCreateFunctionContext(Node* node);
-  Reduction ReduceJSCreateWithContext(Node* node);
-  Reduction ReduceJSCreateCatchContext(Node* node);
-  Reduction ReduceJSCreateBlockContext(Node* node);
   Reduction ReduceJSCallConstruct(Node* node);
   Reduction ReduceJSCallFunction(Node* node);
   Reduction ReduceJSForInDone(Node* node);
   Reduction ReduceJSForInNext(Node* node);
-  Reduction ReduceJSForInPrepare(Node* node);
   Reduction ReduceJSForInStep(Node* node);
   Reduction ReduceSelect(Node* node);
   Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
   Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
   Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
                             const Operator* shift_op);
-  Reduction ReduceNewArray(Node* node, Node* length, int capacity,
-                           Handle<AllocationSite> site);
 
   Node* Word32Shl(Node* const lhs, int32_t const rhs);
-  Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
-  Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
-                              int start_index);
-  Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
-                                 Node* context, Handle<SharedFunctionInfo>,
-                                 bool* has_aliased_arguments);
-  Node* AllocateElements(Node* effect, Node* control,
-                         ElementsKind elements_kind, int capacity,
-                         PretenureFlag pretenure);
 
   Factory* factory() const;
   Graph* graph() const;
@@ -115,10 +92,6 @@
   CompilationDependencies* dependencies() const;
   Flags flags() const { return flags_; }
 
-  // Limits up to which context allocations are inlined.
-  static const int kFunctionContextAllocationLimit = 16;
-  static const int kBlockContextAllocationLimit = 16;
-
   CompilationDependencies* dependencies_;
   Flags flags_;
   JSGraph* jsgraph_;
diff --git a/src/compiler/jump-threading.cc b/src/compiler/jump-threading.cc
index 7b53b5c..5abd346 100644
--- a/src/compiler/jump-threading.cc
+++ b/src/compiler/jump-threading.cc
@@ -53,10 +53,10 @@
   RpoNumber onstack() { return RpoNumber::FromInt(-2); }
 };
 
-
 bool JumpThreading::ComputeForwarding(Zone* local_zone,
                                       ZoneVector<RpoNumber>& result,
-                                      InstructionSequence* code) {
+                                      InstructionSequence* code,
+                                      bool frame_at_start) {
   ZoneStack<RpoNumber> stack(local_zone);
   JumpThreadingState state = {false, result, stack};
   state.Clear(code->InstructionBlockCount());
@@ -91,7 +91,14 @@
         } else if (instr->arch_opcode() == kArchJmp) {
           // try to forward the jump instruction.
           TRACE("  jmp\n");
-          fw = code->InputRpo(instr, 0);
+          // if this block deconstructs the frame, we can't forward it.
+          // TODO(mtrofin): we can still forward if we end up building
+          // the frame at start. So we should move the decision of whether
+          // to build a frame or not in the register allocator, and trickle it
+          // here and to the code generator.
+          if (frame_at_start || !block->must_deconstruct_frame()) {
+            fw = code->InputRpo(instr, 0);
+          }
           fallthru = false;
         } else {
           // can't skip other instructions.
diff --git a/src/compiler/jump-threading.h b/src/compiler/jump-threading.h
index fa74ee9..84520ba 100644
--- a/src/compiler/jump-threading.h
+++ b/src/compiler/jump-threading.h
@@ -18,7 +18,7 @@
   // Compute the forwarding map of basic blocks to their ultimate destination.
   // Returns {true} if there is at least one block that is forwarded.
   static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>& result,
-                                InstructionSequence* code);
+                                InstructionSequence* code, bool frame_at_start);
 
   // Rewrite the instructions to forward jumps and branches.
   // May also negate some branches.
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 2eef929..d4a3665 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -63,9 +63,6 @@
     case CallDescriptor::kCallAddress:
       os << "Addr";
       break;
-    case CallDescriptor::kLazyBailout:
-      os << "LazyBail";
-      break;
   }
   return os;
 }
@@ -120,14 +117,7 @@
 
 
 CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
-  if (info->code_stub() != nullptr) {
-    // Use the code stub interface descriptor.
-    CodeStub* stub = info->code_stub();
-    CallInterfaceDescriptor descriptor = stub->GetCallInterfaceDescriptor();
-    return GetStubCallDescriptor(
-        info->isolate(), zone, descriptor, stub->GetStackParameterCount(),
-        CallDescriptor::kNoFlags, Operator::kNoProperties);
-  }
+  DCHECK(!info->IsStub());
   if (info->has_literal()) {
     // If we already have the function literal, use the number of parameters
     // plus the receiver.
@@ -155,13 +145,14 @@
   switch (function) {
     case Runtime::kAllocateInTargetSpace:
     case Runtime::kCreateIterResultObject:
-    case Runtime::kDefineClassMethod:              // TODO(jarin): Is it safe?
+    case Runtime::kDefineDataPropertyInLiteral:
     case Runtime::kDefineGetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kDefineSetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kFinalizeClassDefinition:        // TODO(conradw): Is it safe?
     case Runtime::kForInDone:
     case Runtime::kForInStep:
     case Runtime::kGetSuperConstructor:
+    case Runtime::kIsFunction:
     case Runtime::kNewClosure:
     case Runtime::kNewClosure_Tenured:
     case Runtime::kNewFunctionContext:
@@ -174,8 +165,6 @@
     case Runtime::kTraceEnter:
     case Runtime::kTraceExit:
       return 0;
-    case Runtime::kInlineArguments:
-    case Runtime::kInlineArgumentsLength:
     case Runtime::kInlineGetPrototype:
     case Runtime::kInlineRegExpConstructResult:
     case Runtime::kInlineRegExpExec:
@@ -242,6 +231,9 @@
   if (locations.return_count_ > 1) {
     locations.AddReturn(regloc(kReturnRegister1));
   }
+  if (locations.return_count_ > 2) {
+    locations.AddReturn(regloc(kReturnRegister2));
+  }
   for (size_t i = 0; i < return_count; i++) {
     types.AddReturn(MachineType::AnyTagged());
   }
@@ -287,31 +279,6 @@
 }
 
 
-CallDescriptor* Linkage::GetLazyBailoutDescriptor(Zone* zone) {
-  const size_t return_count = 0;
-  const size_t parameter_count = 0;
-
-  LocationSignature::Builder locations(zone, return_count, parameter_count);
-  MachineSignature::Builder types(zone, return_count, parameter_count);
-
-  // The target is ignored, but we need to give some values here.
-  MachineType target_type = MachineType::AnyTagged();
-  LinkageLocation target_loc = regloc(kJSFunctionRegister);
-  return new (zone) CallDescriptor(      // --
-      CallDescriptor::kLazyBailout,      // kind
-      target_type,                       // target MachineType
-      target_loc,                        // target location
-      types.Build(),                     // machine_sig
-      locations.Build(),                 // location_sig
-      0,                                 // stack_parameter_count
-      Operator::kNoThrow,                // properties
-      kNoCalleeSaved,                    // callee-saved
-      kNoCalleeSaved,                    // callee-saved fp
-      CallDescriptor::kNeedsFrameState,  // flags
-      "lazy-bailout");
-}
-
-
 CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
                                              int js_parameter_count,
                                              CallDescriptor::Flags flags) {
@@ -350,10 +317,10 @@
 
   // The target for JS function calls is the JSFunction object.
   MachineType target_type = MachineType::AnyTagged();
-  // TODO(titzer): When entering into an OSR function from unoptimized code,
-  // the JSFunction is not in a register, but it is on the stack in an
-  // unaddressable spill slot. We hack this in the OSR prologue. Fix.
-  LinkageLocation target_loc = regloc(kJSFunctionRegister);
+  // When entering into an OSR function from unoptimized code the JSFunction
+  // is not in a register, but it is on the stack in the marker spill slot.
+  LinkageLocation target_loc = is_osr ? LinkageLocation::ForSavedCallerMarker()
+                                      : regloc(kJSFunctionRegister);
   return new (zone) CallDescriptor(     // --
       CallDescriptor::kCallJSFunction,  // kind
       target_type,                      // target MachineType
@@ -369,60 +336,6 @@
       "js-call");
 }
 
-
-CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
-  MachineSignature::Builder types(zone, 0, 6);
-  LocationSignature::Builder locations(zone, 0, 6);
-
-  // Add registers for fixed parameters passed via interpreter dispatch.
-  STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
-  types.AddParam(MachineType::AnyTagged());
-  locations.AddParam(regloc(kInterpreterAccumulatorRegister));
-
-  STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
-  types.AddParam(MachineType::Pointer());
-  locations.AddParam(regloc(kInterpreterRegisterFileRegister));
-
-  STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
-  types.AddParam(MachineType::IntPtr());
-  locations.AddParam(regloc(kInterpreterBytecodeOffsetRegister));
-
-  STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
-  types.AddParam(MachineType::AnyTagged());
-  locations.AddParam(regloc(kInterpreterBytecodeArrayRegister));
-
-  STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
-  types.AddParam(MachineType::Pointer());
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
-  // TODO(rmcilroy): Make the context param the one spilled to the stack once
-  // Turbofan supports modified stack arguments in tail calls.
-  locations.AddParam(
-      LinkageLocation::ForCallerFrameSlot(kInterpreterDispatchTableSpillSlot));
-#else
-  locations.AddParam(regloc(kInterpreterDispatchTableRegister));
-#endif
-
-  STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
-  types.AddParam(MachineType::AnyTagged());
-  locations.AddParam(regloc(kContextRegister));
-
-  LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
-  return new (zone) CallDescriptor(         // --
-      CallDescriptor::kCallCodeObject,      // kind
-      MachineType::None(),                  // target MachineType
-      target_loc,                           // target location
-      types.Build(),                        // machine_sig
-      locations.Build(),                    // location_sig
-      0,                                    // stack_parameter_count
-      Operator::kNoProperties,              // properties
-      kNoCalleeSaved,                       // callee-saved registers
-      kNoCalleeSaved,                       // callee-saved fp regs
-      CallDescriptor::kSupportsTailCalls |  // flags
-          CallDescriptor::kCanUseRoots,     // flags
-      "interpreter-dispatch");
-}
-
-
 // TODO(all): Add support for return representations/locations to
 // CallInterfaceDescriptor.
 // TODO(turbofan): cache call descriptors for code stub calls.
@@ -448,6 +361,9 @@
   if (locations.return_count_ > 1) {
     locations.AddReturn(regloc(kReturnRegister1));
   }
+  if (locations.return_count_ > 2) {
+    locations.AddReturn(regloc(kReturnRegister2));
+  }
   for (size_t i = 0; i < return_count; i++) {
     types.AddReturn(return_type);
   }
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index 252f044..3012f56 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -76,6 +76,12 @@
                               kPointerSize);
   }
 
+  static LinkageLocation ForSavedCallerMarker() {
+    return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+                               StandardFrameConstants::kMarkerOffset) /
+                              kPointerSize);
+  }
+
   static LinkageLocation ConvertToTailCallerLocation(
       LinkageLocation caller_location, int stack_param_delta) {
     if (!caller_location.IsRegister()) {
@@ -140,8 +146,7 @@
   enum Kind {
     kCallCodeObject,  // target is a Code object
     kCallJSFunction,  // target is a JSFunction object
-    kCallAddress,     // target is a machine pointer
-    kLazyBailout      // the call is no-op, only used for lazy bailout
+    kCallAddress      // target is a machine pointer
   };
 
   enum Flag {
@@ -153,9 +158,12 @@
     kHasLocalCatchHandler = 1u << 4,
     kSupportsTailCalls = 1u << 5,
     kCanUseRoots = 1u << 6,
-    // Indicates that the native stack should be used for a code object. This
-    // information is important for native calls on arm64.
+    // (arm64 only) native stack should be used for arguments.
     kUseNativeStack = 1u << 7,
+    // (arm64 only) call instruction has to restore JSSP.
+    kRestoreJSSP = 1u << 8,
+    // Causes the code generator to initialize the root register.
+    kInitializeRootRegister = 1u << 9,
     kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
   };
   typedef base::Flags<Flag> Flags;
@@ -222,6 +230,9 @@
   bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
   bool SupportsTailCalls() const { return flags() & kSupportsTailCalls; }
   bool UseNativeStack() const { return flags() & kUseNativeStack; }
+  bool InitializeRootRegister() const {
+    return flags() & kInitializeRootRegister;
+  }
 
   LinkageLocation GetReturnLocation(size_t index) const {
     return location_sig_->GetReturn(index);
@@ -313,8 +324,6 @@
       Zone* zone, Runtime::FunctionId function, int parameter_count,
       Operator::Properties properties, CallDescriptor::Flags flags);
 
-  static CallDescriptor* GetLazyBailoutDescriptor(Zone* zone);
-
   static CallDescriptor* GetStubCallDescriptor(
       Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
       int stack_parameter_count, CallDescriptor::Flags flags,
@@ -326,13 +335,9 @@
   // for the host platform. This simplified calling convention only supports
   // integers and pointers of one word size each, i.e. no floating point,
   // structs, pointers to members, etc.
-  static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
-                                                  const MachineSignature* sig);
-
-  // Creates a call descriptor for interpreter handler code stubs. These are not
-  // intended to be called directly but are instead dispatched to by the
-  // interpreter.
-  static CallDescriptor* GetInterpreterDispatchDescriptor(Zone* zone);
+  static CallDescriptor* GetSimplifiedCDescriptor(
+      Zone* zone, const MachineSignature* sig,
+      bool set_initialize_root_flag = false);
 
   // Get the location of an (incoming) parameter to this function.
   LinkageLocation GetParameterLocation(int index) const {
@@ -383,15 +388,6 @@
   // A special {OsrValue} index to indicate the context spill slot.
   static const int kOsrContextSpillSlotIndex = -1;
 
-  // Special parameter indices used to pass fixed register data through
-  // interpreter dispatches.
-  static const int kInterpreterAccumulatorParameter = 0;
-  static const int kInterpreterRegisterFileParameter = 1;
-  static const int kInterpreterBytecodeOffsetParameter = 2;
-  static const int kInterpreterBytecodeArrayParameter = 3;
-  static const int kInterpreterDispatchTableParameter = 4;
-  static const int kInterpreterContextParameter = 5;
-
  private:
   CallDescriptor* const incoming_;
 
diff --git a/src/compiler/live-range-separator.cc b/src/compiler/live-range-separator.cc
index 980c944..e3cd0a3 100644
--- a/src/compiler/live-range-separator.cc
+++ b/src/compiler/live-range-separator.cc
@@ -119,8 +119,10 @@
 
 
 void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
+  const InstructionSequence *code = data()->code();
   for (TopLevelLiveRange *top : data()->live_ranges()) {
-    if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr) {
+    if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr ||
+        top->HasSpillOperand() || !top->splinter()->HasSpillRange()) {
       continue;
     }
 
@@ -131,7 +133,10 @@
         break;
       }
     }
-    if (child == nullptr) top->MarkSpilledInDeferredBlock();
+    if (child == nullptr) {
+      top->TreatAsSpilledInDeferredBlock(data()->allocation_zone(),
+                                         code->InstructionBlockCount());
+    }
   }
 }
 
diff --git a/src/compiler/liveness-analyzer.h b/src/compiler/liveness-analyzer.h
index 1e2f85b..9b09724 100644
--- a/src/compiler/liveness-analyzer.h
+++ b/src/compiler/liveness-analyzer.h
@@ -85,6 +85,10 @@
   void Bind(int var) { entries_.push_back(Entry(Entry::kBind, var)); }
   void Checkpoint(Node* node) { entries_.push_back(Entry(node)); }
   void AddPredecessor(LivenessAnalyzerBlock* b) { predecessors_.push_back(b); }
+  LivenessAnalyzerBlock* GetPredecessor() {
+    DCHECK(predecessors_.size() == 1);
+    return predecessors_[0];
+  }
 
  private:
   class Entry {
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 511a10d..3b6f21b 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -91,6 +91,10 @@
   return OpParameter<CheckedStoreRepresentation>(op);
 }
 
+MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kStackSlot, op->opcode());
+  return OpParameter<MachineRepresentation>(op);
+}
 
 #define PURE_OP_LIST(V)                                                       \
   V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
@@ -144,13 +148,17 @@
   V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)                 \
   V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1)                   \
   V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1)                  \
+  V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1)                 \
+  V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1)                \
   V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2)              \
   V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2)              \
   V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2)             \
   V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2)             \
   V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
+  V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1)                    \
   V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1)                    \
   V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1)                    \
+  V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
   V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1)                   \
   V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1)                   \
   V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1)                     \
@@ -186,11 +194,14 @@
   V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1)                 \
   V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1)                \
   V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)                       \
-  V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)
+  V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)                       \
+  V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)
 
 #define PURE_OPTIONAL_OP_LIST(V)                            \
   V(Word32Ctz, Operator::kNoProperties, 1, 0, 1)            \
   V(Word64Ctz, Operator::kNoProperties, 1, 0, 1)            \
+  V(Word32ReverseBits, Operator::kNoProperties, 1, 0, 1)    \
+  V(Word64ReverseBits, Operator::kNoProperties, 1, 0, 1)    \
   V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1)         \
   V(Word64Popcnt, Operator::kNoProperties, 1, 0, 1)         \
   V(Float32Max, Operator::kNoProperties, 2, 0, 1)           \
@@ -207,10 +218,10 @@
   V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
   V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
 
-
 #define MACHINE_TYPE_LIST(V) \
   V(Float32)                 \
   V(Float64)                 \
+  V(Simd128)                 \
   V(Int8)                    \
   V(Uint8)                   \
   V(Int16)                   \
@@ -222,17 +233,16 @@
   V(Pointer)                 \
   V(AnyTagged)
 
-
 #define MACHINE_REPRESENTATION_LIST(V) \
   V(kFloat32)                          \
   V(kFloat64)                          \
+  V(kSimd128)                          \
   V(kWord8)                            \
   V(kWord16)                           \
   V(kWord32)                           \
   V(kWord64)                           \
   V(kTagged)
 
-
 struct MachineOperatorGlobalCache {
 #define PURE(Name, properties, value_input_count, control_input_count,         \
              output_count)                                                     \
@@ -279,6 +289,18 @@
   MACHINE_TYPE_LIST(LOAD)
 #undef LOAD
 
+#define STACKSLOT(Type)                                                       \
+  struct StackSlot##Type##Operator final                                      \
+      : public Operator1<MachineRepresentation> {                             \
+    StackSlot##Type##Operator()                                               \
+        : Operator1<MachineRepresentation>(                                   \
+              IrOpcode::kStackSlot, Operator::kNoThrow, "StackSlot", 0, 0, 0, \
+              1, 0, 0, MachineType::Type().representation()) {}               \
+  };                                                                          \
+  StackSlot##Type##Operator kStackSlot##Type;
+  MACHINE_TYPE_LIST(STACKSLOT)
+#undef STACKSLOT
+
 #define STORE(Type)                                                            \
   struct Store##Type##Operator : public Operator1<StoreRepresentation> {       \
     explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind)        \
@@ -379,6 +401,16 @@
   return nullptr;
 }
 
+const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
+#define STACKSLOT(Type)                              \
+  if (rep == MachineType::Type().representation()) { \
+    return &cache_.kStackSlot##Type;                 \
+  }
+  MACHINE_TYPE_LIST(STACKSLOT)
+#undef STACKSLOT
+  UNREACHABLE();
+  return nullptr;
+}
 
 const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
   switch (store_rep.representation()) {
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 00fefe3..c5a80aa 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -102,6 +102,7 @@
 
 CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
 
+MachineRepresentation StackSlotRepresentationOf(Operator const* op);
 
 // Interface for building machine-level operators. These operators are
 // machine-level but machine-independent and thus define a language suitable
@@ -134,12 +135,15 @@
     kWord64Ctz = 1u << 17,
     kWord32Popcnt = 1u << 18,
     kWord64Popcnt = 1u << 19,
+    kWord32ReverseBits = 1u << 20,
+    kWord64ReverseBits = 1u << 21,
     kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
                       kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
                       kFloat64RoundUp | kFloat32RoundTruncate |
                       kFloat64RoundTruncate | kFloat64RoundTiesAway |
                       kFloat32RoundTiesEven | kFloat64RoundTiesEven |
-                      kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt
+                      kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
+                      kWord32ReverseBits | kWord64ReverseBits
   };
   typedef base::Flags<Flag, unsigned> Flags;
 
@@ -160,6 +164,8 @@
   const OptionalOperator Word32Ctz();
   const OptionalOperator Word32Popcnt();
   const OptionalOperator Word64Popcnt();
+  const OptionalOperator Word32ReverseBits();
+  const OptionalOperator Word64ReverseBits();
   bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
 
   const Operator* Word64And();
@@ -213,6 +219,8 @@
   const Operator* ChangeFloat32ToFloat64();
   const Operator* ChangeFloat64ToInt32();   // narrowing
   const Operator* ChangeFloat64ToUint32();  // narrowing
+  const Operator* TruncateFloat32ToInt32();
+  const Operator* TruncateFloat32ToUint32();
   const Operator* TryTruncateFloat32ToInt64();
   const Operator* TryTruncateFloat64ToInt64();
   const Operator* TryTruncateFloat32ToUint64();
@@ -227,8 +235,10 @@
   const Operator* TruncateFloat64ToFloat32();
   const Operator* TruncateFloat64ToInt32(TruncationMode);
   const Operator* TruncateInt64ToInt32();
+  const Operator* RoundInt32ToFloat32();
   const Operator* RoundInt64ToFloat32();
   const Operator* RoundInt64ToFloat64();
+  const Operator* RoundUint32ToFloat32();
   const Operator* RoundUint64ToFloat32();
   const Operator* RoundUint64ToFloat64();
 
@@ -303,9 +313,12 @@
   // store [base + index], value
   const Operator* Store(StoreRepresentation rep);
 
+  const Operator* StackSlot(MachineRepresentation rep);
+
   // Access to the machine stack.
   const Operator* LoadStackPointer();
   const Operator* LoadFramePointer();
+  const Operator* LoadParentFramePointer();
 
   // checked-load heap, index, length
   const Operator* CheckedLoad(CheckedLoadRepresentation);
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index 75e4b9e..cdd7e34 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -227,19 +227,25 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, eq,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    // TODO(turbofan): Once we get frame elision working, we need to save
-    // and restore lr properly here if the frame was elided.
+    if (!frame()->needs_frame()) {
+      // We need to save and restore ra if the frame was elided.
+      __ Push(ra);
+    }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ Addu(scratch1_, object_, index_);
     __ CallStub(&stub);
+    if (!frame()->needs_frame()) {
+      __ Pop(ra);
+    }
   }
 
  private:
@@ -546,11 +552,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
       __ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -604,6 +605,13 @@
     case kArchFramePointer:
       __ mov(i.OutputRegister(), fp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ lw(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ mov(i.OutputRegister(), fp);
+      }
+      break;
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
@@ -625,6 +633,13 @@
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      __ Addu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+              Operand(offset.offset()));
+      break;
+    }
     case kMipsAdd:
       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
@@ -688,6 +703,70 @@
     case kMipsClz:
       __ Clz(i.OutputRegister(), i.InputRegister(0));
       break;
+    case kMipsCtz: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      Label skip_for_zero;
+      Label end;
+      // Branch if the operand is zero
+      __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
+      // Find the number of bits before the last bit set to 1.
+      __ Subu(reg2, zero_reg, i.InputRegister(0));
+      __ And(reg2, reg2, i.InputRegister(0));
+      __ clz(reg2, reg2);
+      // Get the number of bits after the last bit set to 1.
+      __ li(reg1, 0x1F);
+      __ Subu(i.OutputRegister(), reg1, reg2);
+      __ Branch(&end);
+      __ bind(&skip_for_zero);
+      // If the operand is zero, return word length as the result.
+      __ li(i.OutputRegister(), 0x20);
+      __ bind(&end);
+    } break;
+    case kMipsPopcnt: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      uint32_t m1 = 0x55555555;
+      uint32_t m2 = 0x33333333;
+      uint32_t m4 = 0x0f0f0f0f;
+      uint32_t m8 = 0x00ff00ff;
+      uint32_t m16 = 0x0000ffff;
+
+      // Put count of ones in every 2 bits into those 2 bits.
+      __ li(at, m1);
+      __ srl(reg1, i.InputRegister(0), 1);
+      __ And(reg2, i.InputRegister(0), at);
+      __ And(reg1, reg1, at);
+      __ addu(reg1, reg1, reg2);
+
+      // Put count of ones in every 4 bits into those 4 bits.
+      __ li(at, m2);
+      __ srl(reg2, reg1, 2);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ addu(reg1, reg1, reg2);
+
+      // Put count of ones in every 8 bits into those 8 bits.
+      __ li(at, m4);
+      __ srl(reg2, reg1, 4);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ addu(reg1, reg1, reg2);
+
+      // Put count of ones in every 16 bits into those 16 bits.
+      __ li(at, m8);
+      __ srl(reg2, reg1, 8);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ addu(reg1, reg1, reg2);
+
+      // Calculate total number of ones.
+      __ li(at, m16);
+      __ srl(reg2, reg1, 16);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ addu(i.OutputRegister(), reg1, reg2);
+    } break;
     case kMipsShl:
       if (instr->InputAt(1)->IsRegister()) {
         __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -950,6 +1029,12 @@
       __ cvt_s_w(i.OutputDoubleRegister(), scratch);
       break;
     }
+    case kMipsCvtSUw: {
+      FPURegister scratch = kScratchDoubleReg;
+      __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+      __ cvt_s_d(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+      break;
+    }
     case kMipsCvtDUw: {
       FPURegister scratch = kScratchDoubleReg;
       __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
@@ -1010,6 +1095,12 @@
       __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
       break;
     }
+    case kMipsTruncUwS: {
+      FPURegister scratch = kScratchDoubleReg;
+      // TODO(plind): Fix wrong param order of Trunc_uw_s() macro-asm function.
+      __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+      break;
+    }
     case kMipsFloat64ExtractLowWord32:
       __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
@@ -1416,19 +1507,10 @@
   MipsOperandConverter i(this, instr);
   Register input = i.InputRegister(0);
   size_t const case_count = instr->InputCount() - 2;
-  Label here;
   __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
-  __ BlockTrampolinePoolFor(case_count + 6);
-  __ bal(&here);
-  __ sll(at, input, 2);  // Branch delay slot.
-  __ bind(&here);
-  __ addu(at, at, ra);
-  __ lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
-  __ jr(at);
-  __ nop();  // Branch delay slot nop.
-  for (size_t index = 0; index < case_count; ++index) {
-    __ dd(GetLabel(i.InputRpo(index + 2)));
-  }
+  __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+    return GetLabel(i.InputRpo(index + 2));
+  });
 }
 
 
@@ -1465,8 +1547,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index c938177..64aecd0 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -28,6 +28,8 @@
   V(MipsNor)                       \
   V(MipsXor)                       \
   V(MipsClz)                       \
+  V(MipsCtz)                       \
+  V(MipsPopcnt)                    \
   V(MipsShl)                       \
   V(MipsShr)                       \
   V(MipsSar)                       \
@@ -76,9 +78,11 @@
   V(MipsFloorWS)                   \
   V(MipsCeilWS)                    \
   V(MipsTruncUwD)                  \
+  V(MipsTruncUwS)                  \
   V(MipsCvtDW)                     \
   V(MipsCvtDUw)                    \
   V(MipsCvtSW)                     \
+  V(MipsCvtSUw)                    \
   V(MipsLb)                        \
   V(MipsLbu)                       \
   V(MipsSb)                        \
@@ -103,7 +107,6 @@
   V(MipsStoreToStackSlot)          \
   V(MipsStackClaim)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index 61cea76..df972f7 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -151,7 +151,8 @@
     case MachineRepresentation::kWord32:
       opcode = kMipsLw;
       break;
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -231,7 +232,8 @@
       case MachineRepresentation::kWord32:
         opcode = kMipsSw;
         break;
-      case MachineRepresentation::kWord64:  // Fall through.
+      case MachineRepresentation::kWord64:   // Fall through.
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -401,10 +403,19 @@
 }
 
 
-void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
 
 
-void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
 
 
 void InstructionSelector::VisitInt32Add(Node* node) {
@@ -503,6 +514,16 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kMipsCvtSW, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kMipsCvtSUw, node);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRR(this, kMipsCvtDW, node);
 }
@@ -513,6 +534,16 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kMipsTruncWS, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kMipsTruncUwS, node);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   MipsOperandGenerator g(this);
   Node* value = node->InputAt(0);
@@ -821,9 +852,11 @@
     // Poke any stack arguments.
     int slot = kCArgSlotCount;
     for (PushParameter input : (*arguments)) {
-      Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
-           g.TempImmediate(slot << kPointerSizeLog2));
-      ++slot;
+      if (input.node()) {
+        Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+             g.TempImmediate(slot << kPointerSizeLog2));
+        ++slot;
+      }
     }
   } else {
     // Possibly align stack here for functions.
@@ -869,9 +902,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1318,7 +1352,9 @@
              MachineOperatorBuilder::kFloat64RoundTruncate |
              MachineOperatorBuilder::kFloat64RoundTiesEven;
   }
-  return flags | MachineOperatorBuilder::kInt32DivIsSafe |
+  return flags | MachineOperatorBuilder::kWord32Ctz |
+         MachineOperatorBuilder::kWord32Popcnt |
+         MachineOperatorBuilder::kInt32DivIsSafe |
          MachineOperatorBuilder::kUint32DivIsSafe |
          MachineOperatorBuilder::kWord32ShiftIsSafe |
          MachineOperatorBuilder::kFloat64Min |
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index 1b81aa5..373a1a6 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -227,19 +227,25 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, eq,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    // TODO(turbofan): Once we get frame elision working, we need to save
-    // and restore lr properly here if the frame was elided.
+    if (!frame()->needs_frame()) {
+      // We need to save and restore ra if the frame was elided.
+      __ Push(ra);
+    }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ Daddu(scratch1_, object_, index_);
     __ CallStub(&stub);
+    if (!frame()->needs_frame()) {
+      __ Pop(ra);
+    }
   }
 
  private:
@@ -556,11 +562,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
       __ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -614,6 +615,13 @@
     case kArchFramePointer:
       __ mov(i.OutputRegister(), fp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ ld(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ mov(i.OutputRegister(), fp);
+      }
+      break;
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
@@ -635,6 +643,13 @@
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      __ Daddu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+               Operand(offset.offset()));
+      break;
+    }
     case kMips64Add:
       __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
@@ -735,6 +750,142 @@
     case kMips64Dclz:
       __ dclz(i.OutputRegister(), i.InputRegister(0));
       break;
+    case kMips64Ctz: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      Label skip_for_zero;
+      Label end;
+      // Branch if the operand is zero
+      __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
+      // Find the number of bits before the last bit set to 1.
+      __ Subu(reg2, zero_reg, i.InputRegister(0));
+      __ And(reg2, reg2, i.InputRegister(0));
+      __ clz(reg2, reg2);
+      // Get the number of bits after the last bit set to 1.
+      __ li(reg1, 0x1F);
+      __ Subu(i.OutputRegister(), reg1, reg2);
+      __ Branch(&end);
+      __ bind(&skip_for_zero);
+      // If the operand is zero, return word length as the result.
+      __ li(i.OutputRegister(), 0x20);
+      __ bind(&end);
+    } break;
+    case kMips64Dctz: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      Label skip_for_zero;
+      Label end;
+      // Branch if the operand is zero
+      __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
+      // Find the number of bits before the last bit set to 1.
+      __ Dsubu(reg2, zero_reg, i.InputRegister(0));
+      __ And(reg2, reg2, i.InputRegister(0));
+      __ dclz(reg2, reg2);
+      // Get the number of bits after the last bit set to 1.
+      __ li(reg1, 0x3F);
+      __ Subu(i.OutputRegister(), reg1, reg2);
+      __ Branch(&end);
+      __ bind(&skip_for_zero);
+      // If the operand is zero, return word length as the result.
+      __ li(i.OutputRegister(), 0x40);
+      __ bind(&end);
+    } break;
+    case kMips64Popcnt: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      uint32_t m1 = 0x55555555;
+      uint32_t m2 = 0x33333333;
+      uint32_t m4 = 0x0f0f0f0f;
+      uint32_t m8 = 0x00ff00ff;
+      uint32_t m16 = 0x0000ffff;
+
+      // Put count of ones in every 2 bits into those 2 bits.
+      __ li(at, m1);
+      __ dsrl(reg1, i.InputRegister(0), 1);
+      __ And(reg2, i.InputRegister(0), at);
+      __ And(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 4 bits into those 4 bits.
+      __ li(at, m2);
+      __ dsrl(reg2, reg1, 2);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 8 bits into those 8 bits.
+      __ li(at, m4);
+      __ dsrl(reg2, reg1, 4);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 16 bits into those 16 bits.
+      __ li(at, m8);
+      __ dsrl(reg2, reg1, 8);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Calculate total number of ones.
+      __ li(at, m16);
+      __ dsrl(reg2, reg1, 16);
+      __ And(reg2, reg2, at);
+      __ And(reg1, reg1, at);
+      __ Daddu(i.OutputRegister(), reg1, reg2);
+    } break;
+    case kMips64Dpopcnt: {
+      Register reg1 = kScratchReg;
+      Register reg2 = kScratchReg2;
+      uint64_t m1 = 0x5555555555555555;
+      uint64_t m2 = 0x3333333333333333;
+      uint64_t m4 = 0x0f0f0f0f0f0f0f0f;
+      uint64_t m8 = 0x00ff00ff00ff00ff;
+      uint64_t m16 = 0x0000ffff0000ffff;
+      uint64_t m32 = 0x00000000ffffffff;
+
+      // Put count of ones in every 2 bits into those 2 bits.
+      __ li(at, m1);
+      __ dsrl(reg1, i.InputRegister(0), 1);
+      __ and_(reg2, i.InputRegister(0), at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 4 bits into those 4 bits.
+      __ li(at, m2);
+      __ dsrl(reg2, reg1, 2);
+      __ and_(reg2, reg2, at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 8 bits into those 8 bits.
+      __ li(at, m4);
+      __ dsrl(reg2, reg1, 4);
+      __ and_(reg2, reg2, at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 16 bits into those 16 bits.
+      __ li(at, m8);
+      __ dsrl(reg2, reg1, 8);
+      __ and_(reg2, reg2, at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Put count of ones in every 32 bits into those 32 bits.
+      __ li(at, m16);
+      __ dsrl(reg2, reg1, 16);
+      __ and_(reg2, reg2, at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(reg1, reg1, reg2);
+
+      // Calculate total number of ones.
+      __ li(at, m32);
+      __ dsrl32(reg2, reg1, 0);
+      __ and_(reg2, reg2, at);
+      __ and_(reg1, reg1, at);
+      __ Daddu(i.OutputRegister(), reg1, reg2);
+    } break;
     case kMips64Shl:
       if (instr->InputAt(1)->IsRegister()) {
         __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -1065,6 +1216,10 @@
       __ cvt_s_w(i.OutputDoubleRegister(), scratch);
       break;
     }
+    case kMips64CvtSUw: {
+      __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+    }
     case kMips64CvtSL: {
       FPURegister scratch = kScratchDoubleReg;
       __ dmtc1(i.InputRegister(0), scratch);
@@ -1200,6 +1355,12 @@
       __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
       break;
     }
+    case kMips64TruncUwS: {
+      FPURegister scratch = kScratchDoubleReg;
+      // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
+      __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+      break;
+    }
     case kMips64TruncUlS: {
       FPURegister scratch = kScratchDoubleReg;
       Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
@@ -1648,27 +1809,15 @@
   AssembleArchJump(i.InputRpo(1));
 }
 
-
 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
   MipsOperandConverter i(this, instr);
   Register input = i.InputRegister(0);
   size_t const case_count = instr->InputCount() - 2;
-  Label here;
 
   __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
-  __ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
-  // Ensure that dd-ed labels use 8 byte aligned addresses.
-  __ Align(8);
-  __ bal(&here);
-  __ dsll(at, input, 3);  // Branch delay slot.
-  __ bind(&here);
-  __ daddu(at, at, ra);
-  __ ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
-  __ jr(at);
-  __ nop();  // Branch delay slot nop.
-  for (size_t index = 0; index < case_count; ++index) {
-    __ dd(GetLabel(i.InputRpo(index + 2)));
-  }
+  __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+    return GetLabel(i.InputRpo(index + 2));
+  });
 }
 
 
@@ -1705,8 +1854,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index 778c6ad..9e94c09 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -44,6 +44,10 @@
   V(Mips64Dext)                     \
   V(Mips64Dins)                     \
   V(Mips64Dclz)                     \
+  V(Mips64Ctz)                      \
+  V(Mips64Dctz)                     \
+  V(Mips64Popcnt)                   \
+  V(Mips64Dpopcnt)                  \
   V(Mips64Dshl)                     \
   V(Mips64Dshr)                     \
   V(Mips64Dsar)                     \
@@ -93,11 +97,13 @@
   V(Mips64TruncLS)                  \
   V(Mips64TruncLD)                  \
   V(Mips64TruncUwD)                 \
+  V(Mips64TruncUwS)                 \
   V(Mips64TruncUlS)                 \
   V(Mips64TruncUlD)                 \
   V(Mips64CvtDW)                    \
   V(Mips64CvtSL)                    \
   V(Mips64CvtSW)                    \
+  V(Mips64CvtSUw)                   \
   V(Mips64CvtSUl)                   \
   V(Mips64CvtDL)                    \
   V(Mips64CvtDUw)                   \
@@ -130,7 +136,6 @@
   V(Mips64StoreToStackSlot)         \
   V(Mips64StackClaim)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 1b12bd9..44a5470 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -159,6 +159,7 @@
     case MachineRepresentation::kWord64:
       opcode = kMips64Ld;
       break;
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -241,6 +242,7 @@
       case MachineRepresentation::kWord64:
         opcode = kMips64Sd;
         break;
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -562,16 +564,36 @@
 }
 
 
-void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
 
 
-void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
 
 
-void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
 
 
-void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64Ctz(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Popcnt, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
 
 
 void InstructionSelector::VisitWord64Ror(Node* node) {
@@ -802,6 +824,16 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kMips64CvtSW, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kMips64CvtSUw, node);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRR(this, kMips64CvtDW, node);
 }
@@ -812,6 +844,16 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kMips64TruncWS, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kMips64TruncUwS, node);
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   Mips64OperandGenerator g(this);
   Node* value = node->InputAt(0);
@@ -1307,6 +1349,7 @@
       break;
     case MachineRepresentation::kBit:
     case MachineRepresentation::kTagged:
+    case MachineRepresentation::kSimd128:
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1356,6 +1399,7 @@
       break;
     case MachineRepresentation::kBit:
     case MachineRepresentation::kTagged:
+    case MachineRepresentation::kSimd128:
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1846,7 +1890,11 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  return MachineOperatorBuilder::kWord32ShiftIsSafe |
+  return MachineOperatorBuilder::kWord32Ctz |
+         MachineOperatorBuilder::kWord64Ctz |
+         MachineOperatorBuilder::kWord32Popcnt |
+         MachineOperatorBuilder::kWord64Popcnt |
+         MachineOperatorBuilder::kWord32ShiftIsSafe |
          MachineOperatorBuilder::kInt32DivIsSafe |
          MachineOperatorBuilder::kUint32DivIsSafe |
          MachineOperatorBuilder::kFloat64Min |
diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc
index bde3f7f..477f139 100644
--- a/src/compiler/move-optimizer.cc
+++ b/src/compiler/move-optimizer.cc
@@ -10,14 +10,17 @@
 
 namespace {
 
-typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
+struct MoveKey {
+  InstructionOperand source;
+  InstructionOperand destination;
+};
 
 struct MoveKeyCompare {
   bool operator()(const MoveKey& a, const MoveKey& b) const {
-    if (a.first.EqualsCanonicalized(b.first)) {
-      return a.second.CompareCanonicalized(b.second);
+    if (a.source.EqualsCanonicalized(b.source)) {
+      return a.destination.CompareCanonicalized(b.destination);
     }
-    return a.first.CompareCanonicalized(b.first);
+    return a.source.CompareCanonicalized(b.source);
   }
 };
 
@@ -32,39 +35,6 @@
 typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
 
 
-bool GapsCanMoveOver(Instruction* instr, Zone* zone) {
-  if (instr->IsNop()) return true;
-  if (instr->ClobbersTemps() || instr->ClobbersRegisters() ||
-      instr->ClobbersDoubleRegisters()) {
-    return false;
-  }
-  if (instr->arch_opcode() != ArchOpcode::kArchNop) return false;
-
-  ZoneSet<InstructionOperand, OperandCompare> operands(zone);
-  for (size_t i = 0; i < instr->InputCount(); ++i) {
-    operands.insert(*instr->InputAt(i));
-  }
-  for (size_t i = 0; i < instr->OutputCount(); ++i) {
-    operands.insert(*instr->OutputAt(i));
-  }
-  for (size_t i = 0; i < instr->TempCount(); ++i) {
-    operands.insert(*instr->TempAt(i));
-  }
-  for (int i = Instruction::GapPosition::FIRST_GAP_POSITION;
-       i <= Instruction::GapPosition::LAST_GAP_POSITION; ++i) {
-    ParallelMove* moves = instr->parallel_moves()[i];
-    if (moves == nullptr) continue;
-    for (MoveOperands* move : *moves) {
-      if (operands.count(move->source()) > 0 ||
-          operands.count(move->destination()) > 0) {
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
-
 int FindFirstNonEmptySlot(const Instruction* instr) {
   int i = Instruction::FIRST_GAP_POSITION;
   for (; i <= Instruction::LAST_GAP_POSITION; i++) {
@@ -85,11 +55,13 @@
 MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
     : local_zone_(local_zone),
       code_(code),
-      to_finalize_(local_zone),
       local_vector_(local_zone) {}
 
 
 void MoveOptimizer::Run() {
+  for (Instruction* instruction : code()->instructions()) {
+    CompressGaps(instruction);
+  }
   for (InstructionBlock* block : code()->instruction_blocks()) {
     CompressBlock(block);
   }
@@ -111,13 +83,140 @@
     }
     OptimizeMerge(block);
   }
-  for (Instruction* gap : to_finalize_) {
+  for (Instruction* gap : code()->instructions()) {
     FinalizeMoves(gap);
   }
 }
 
+void MoveOptimizer::RemoveClobberedDestinations(Instruction* instruction) {
+  if (instruction->IsCall()) return;
+  ParallelMove* moves = instruction->parallel_moves()[0];
+  if (moves == nullptr) return;
 
-void MoveOptimizer::CompressMoves(ParallelMove* left, ParallelMove* right) {
+  DCHECK(instruction->parallel_moves()[1] == nullptr ||
+         instruction->parallel_moves()[1]->empty());
+
+  OperandSet outputs(local_zone());
+  OperandSet inputs(local_zone());
+
+  // Outputs and temps are treated together as potentially clobbering a
+  // destination operand.
+  for (size_t i = 0; i < instruction->OutputCount(); ++i) {
+    outputs.insert(*instruction->OutputAt(i));
+  }
+  for (size_t i = 0; i < instruction->TempCount(); ++i) {
+    outputs.insert(*instruction->TempAt(i));
+  }
+
+  // Input operands block elisions.
+  for (size_t i = 0; i < instruction->InputCount(); ++i) {
+    inputs.insert(*instruction->InputAt(i));
+  }
+
+  // Elide moves made redundant by the instruction.
+  for (MoveOperands* move : *moves) {
+    if (outputs.find(move->destination()) != outputs.end() &&
+        inputs.find(move->destination()) == inputs.end()) {
+      move->Eliminate();
+    }
+  }
+
+  // The ret instruction makes any assignment before it unnecessary, except for
+  // the one for its input.
+  if (instruction->opcode() == ArchOpcode::kArchRet) {
+    for (MoveOperands* move : *moves) {
+      if (inputs.find(move->destination()) == inputs.end()) {
+        move->Eliminate();
+      }
+    }
+  }
+}
+
+void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
+  if (from->IsCall()) return;
+
+  ParallelMove* from_moves = from->parallel_moves()[0];
+  if (from_moves == nullptr || from_moves->empty()) return;
+
+  ZoneSet<InstructionOperand, OperandCompare> dst_cant_be(local_zone());
+  ZoneSet<InstructionOperand, OperandCompare> src_cant_be(local_zone());
+
+  // If an operand is an input to the instruction, we cannot move assignments
+  // where it appears on the LHS.
+  for (size_t i = 0; i < from->InputCount(); ++i) {
+    dst_cant_be.insert(*from->InputAt(i));
+  }
+  // If an operand is output to the instruction, we cannot move assignments
+  // where it appears on the RHS, because we would lose its value before the
+  // instruction.
+  // Same for temp operands.
+  // The output can't appear on the LHS because we performed
+  // RemoveClobberedDestinations for the "from" instruction.
+  for (size_t i = 0; i < from->OutputCount(); ++i) {
+    src_cant_be.insert(*from->OutputAt(i));
+  }
+  for (size_t i = 0; i < from->TempCount(); ++i) {
+    src_cant_be.insert(*from->TempAt(i));
+  }
+  for (MoveOperands* move : *from_moves) {
+    if (move->IsRedundant()) continue;
+    // Assume dest has a value "V". If we have a "dest = y" move, then we can't
+    // move "z = dest", because z would become y rather than "V".
+    // We assume CompressMoves has happened before this, which means we don't
+    // have more than one assignment to dest.
+    src_cant_be.insert(move->destination());
+  }
+
+  ZoneSet<MoveKey, MoveKeyCompare> move_candidates(local_zone());
+  // We start with all the moves that don't have conflicting source or
+  // destination operands are eligible for being moved down.
+  for (MoveOperands* move : *from_moves) {
+    if (move->IsRedundant()) continue;
+    if (dst_cant_be.find(move->destination()) == dst_cant_be.end()) {
+      MoveKey key = {move->source(), move->destination()};
+      move_candidates.insert(key);
+    }
+  }
+  if (move_candidates.empty()) return;
+
+  // Stabilize the candidate set.
+  bool changed = false;
+  do {
+    changed = false;
+    for (auto iter = move_candidates.begin(); iter != move_candidates.end();) {
+      auto current = iter;
+      ++iter;
+      InstructionOperand src = current->source;
+      if (src_cant_be.find(src) != src_cant_be.end()) {
+        src_cant_be.insert(current->destination);
+        move_candidates.erase(current);
+        changed = true;
+      }
+    }
+  } while (changed);
+
+  ParallelMove to_move(local_zone());
+  for (MoveOperands* move : *from_moves) {
+    if (move->IsRedundant()) continue;
+    MoveKey key = {move->source(), move->destination()};
+    if (move_candidates.find(key) != move_candidates.end()) {
+      to_move.AddMove(move->source(), move->destination(), code_zone());
+      move->Eliminate();
+    }
+  }
+  if (to_move.empty()) return;
+
+  ParallelMove* dest =
+      to->GetOrCreateParallelMove(Instruction::GapPosition::START, code_zone());
+
+  CompressMoves(&to_move, dest);
+  DCHECK(dest->empty());
+  for (MoveOperands* m : to_move) {
+    dest->push_back(m);
+  }
+}
+
+void MoveOptimizer::CompressMoves(ParallelMove* left, MoveOpVector* right) {
   if (right == nullptr) return;
 
   MoveOpVector& eliminated = local_vector();
@@ -147,54 +246,49 @@
   DCHECK(eliminated.empty());
 }
 
+void MoveOptimizer::CompressGaps(Instruction* instruction) {
+  int i = FindFirstNonEmptySlot(instruction);
+  bool has_moves = i <= Instruction::LAST_GAP_POSITION;
+  USE(has_moves);
 
-// Smash all consecutive moves into the left most move slot and accumulate them
-// as much as possible across instructions.
-void MoveOptimizer::CompressBlock(InstructionBlock* block) {
-  Instruction* prev_instr = nullptr;
-  for (int index = block->code_start(); index < block->code_end(); ++index) {
-    Instruction* instr = code()->instructions()[index];
-    int i = FindFirstNonEmptySlot(instr);
-    bool has_moves = i <= Instruction::LAST_GAP_POSITION;
-
-    if (i == Instruction::LAST_GAP_POSITION) {
-      std::swap(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
-                instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
-    } else if (i == Instruction::FIRST_GAP_POSITION) {
-      CompressMoves(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
-                    instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
-    }
-    // We either have no moves, or, after swapping or compressing, we have
-    // all the moves in the first gap position, and none in the second/end gap
-    // position.
-    ParallelMove* first =
-        instr->parallel_moves()[Instruction::FIRST_GAP_POSITION];
-    ParallelMove* last =
-        instr->parallel_moves()[Instruction::LAST_GAP_POSITION];
-    USE(last);
-
-    DCHECK(!has_moves ||
-           (first != nullptr && (last == nullptr || last->empty())));
-
-    if (prev_instr != nullptr) {
-      if (has_moves) {
-        // Smash first into prev_instr, killing left.
-        ParallelMove* pred_moves = prev_instr->parallel_moves()[0];
-        CompressMoves(pred_moves, first);
-      }
-      // Slide prev_instr down so we always know where to look for it.
-      std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
-    }
-
-    prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
-    if (GapsCanMoveOver(instr, local_zone())) continue;
-    if (prev_instr != nullptr) {
-      to_finalize_.push_back(prev_instr);
-      prev_instr = nullptr;
-    }
+  if (i == Instruction::LAST_GAP_POSITION) {
+    std::swap(instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+              instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]);
+  } else if (i == Instruction::FIRST_GAP_POSITION) {
+    CompressMoves(
+        instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+        instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]);
   }
-  if (prev_instr != nullptr) {
-    to_finalize_.push_back(prev_instr);
+  // We either have no moves, or, after swapping or compressing, we have
+  // all the moves in the first gap position, and none in the second/end gap
+  // position.
+  ParallelMove* first =
+      instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION];
+  ParallelMove* last =
+      instruction->parallel_moves()[Instruction::LAST_GAP_POSITION];
+  USE(first);
+  USE(last);
+
+  DCHECK(!has_moves ||
+         (first != nullptr && (last == nullptr || last->empty())));
+}
+
+void MoveOptimizer::CompressBlock(InstructionBlock* block) {
+  int first_instr_index = block->first_instruction_index();
+  int last_instr_index = block->last_instruction_index();
+
+  // Start by removing gap assignments where the output of the subsequent
+  // instruction appears on LHS, as long as they are not needed by its input.
+  Instruction* prev_instr = code()->instructions()[first_instr_index];
+  RemoveClobberedDestinations(prev_instr);
+
+  for (int index = first_instr_index + 1; index <= last_instr_index; ++index) {
+    Instruction* instr = code()->instructions()[index];
+    // Migrate to the gap of prev_instr eligible moves from instr.
+    MigrateMoves(instr, prev_instr);
+    // Remove gap assignments clobbered by instr's output.
+    RemoveClobberedDestinations(instr);
+    prev_instr = instr;
   }
 }
 
@@ -211,6 +305,12 @@
   // things that would prevent moving gap moves across them.
   for (RpoNumber& pred_index : block->predecessors()) {
     const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+
+    // If the predecessor has more than one successor, we shouldn't attempt to
+    // move down to this block (one of the successors) any of the gap moves,
+    // because their effect may be necessary to the other successors.
+    if (pred->SuccessorCount() > 1) return;
+
     const Instruction* last_instr =
         code()->instructions()[pred->last_instruction_index()];
     if (last_instr->IsCall()) return;
@@ -246,21 +346,54 @@
       }
     }
   }
-  if (move_map.empty() || correct_counts != move_map.size()) return;
+  if (move_map.empty() || correct_counts == 0) return;
+
   // Find insertion point.
-  Instruction* instr = nullptr;
-  for (int i = block->first_instruction_index();
-       i <= block->last_instruction_index(); ++i) {
-    instr = code()->instructions()[i];
-    if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
-      break;
+  Instruction* instr = code()->instructions()[block->first_instruction_index()];
+
+  if (correct_counts != move_map.size()) {
+    // Moves that are unique to each predecessor won't be pushed to the common
+    // successor.
+    OperandSet conflicting_srcs(local_zone());
+    for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
+      auto current = iter;
+      ++iter;
+      if (current->second != block->PredecessorCount()) {
+        InstructionOperand dest = current->first.destination;
+        // Not all the moves in all the gaps are the same. Maybe some are. If
+        // there are such moves, we could move them, but the destination of the
+        // moves staying behind can't appear as a source of a common move,
+        // because the move staying behind will clobber this destination.
+        conflicting_srcs.insert(dest);
+        move_map.erase(current);
+      }
+    }
+
+    bool changed = false;
+    do {
+      // If a common move can't be pushed to the common successor, then its
+      // destination also can't appear as source to any move being pushed.
+      changed = false;
+      for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
+        auto current = iter;
+        ++iter;
+        DCHECK_EQ(block->PredecessorCount(), current->second);
+        if (conflicting_srcs.find(current->first.source) !=
+            conflicting_srcs.end()) {
+          conflicting_srcs.insert(current->first.destination);
+          move_map.erase(current);
+          changed = true;
+        }
+      }
+    } while (changed);
   }
+
+  if (move_map.empty()) return;
+
   DCHECK_NOT_NULL(instr);
   bool gap_initialized = true;
-  if (instr->parallel_moves()[0] == nullptr ||
-      instr->parallel_moves()[0]->empty()) {
-    to_finalize_.push_back(instr);
-  } else {
+  if (instr->parallel_moves()[0] != nullptr &&
+      !instr->parallel_moves()[0]->empty()) {
     // Will compress after insertion.
     gap_initialized = false;
     std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
@@ -275,12 +408,12 @@
       if (move->IsRedundant()) continue;
       MoveKey key = {move->source(), move->destination()};
       auto it = move_map.find(key);
-      USE(it);
-      DCHECK(it != move_map.end());
-      if (first_iteration) {
-        moves->AddMove(move->source(), move->destination());
+      if (it != move_map.end()) {
+        if (first_iteration) {
+          moves->AddMove(move->source(), move->destination());
+        }
+        move->Eliminate();
       }
-      move->Eliminate();
     }
     first_iteration = false;
   }
@@ -288,6 +421,7 @@
   if (!gap_initialized) {
     CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]);
   }
+  CompressBlock(block);
 }
 
 
@@ -316,8 +450,10 @@
   MoveOpVector& loads = local_vector();
   DCHECK(loads.empty());
 
+  ParallelMove* parallel_moves = instr->parallel_moves()[0];
+  if (parallel_moves == nullptr) return;
   // Find all the loads.
-  for (MoveOperands* move : *instr->parallel_moves()[0]) {
+  for (MoveOperands* move : *parallel_moves) {
     if (move->IsRedundant()) continue;
     if (move->source().IsConstant() || IsSlot(move->source())) {
       loads.push_back(move);
diff --git a/src/compiler/move-optimizer.h b/src/compiler/move-optimizer.h
index c9a3289..8e932a0 100644
--- a/src/compiler/move-optimizer.h
+++ b/src/compiler/move-optimizer.h
@@ -26,15 +26,30 @@
   Zone* code_zone() const { return code()->zone(); }
   MoveOpVector& local_vector() { return local_vector_; }
 
-  void CompressBlock(InstructionBlock* blocke);
-  void CompressMoves(ParallelMove* left, ParallelMove* right);
+  // Consolidate moves into the first gap.
+  void CompressGaps(Instruction* instr);
+
+  // Attempt to push down to the last instruction those moves that can.
+  void CompressBlock(InstructionBlock* block);
+
+  // Consolidate moves into the first gap.
+  void CompressMoves(ParallelMove* left, MoveOpVector* right);
+
+  // Push down those moves in the gap of from that do not change the
+  // semantics of the from instruction, nor the semantics of the moves
+  // that remain behind.
+  void MigrateMoves(Instruction* to, Instruction* from);
+
+  void RemoveClobberedDestinations(Instruction* instruction);
+
   const Instruction* LastInstruction(const InstructionBlock* block) const;
+
+  // Consolidate common moves appearing accross all predecessors of a block.
   void OptimizeMerge(InstructionBlock* block);
   void FinalizeMoves(Instruction* instr);
 
   Zone* const local_zone_;
   InstructionSequence* const code_;
-  Instructions to_finalize_;
   MoveOpVector local_vector_;
 
   DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
diff --git a/src/compiler/node-properties.cc b/src/compiler/node-properties.cc
index cb6c3c4..ac9cc34 100644
--- a/src/compiler/node-properties.cc
+++ b/src/compiler/node-properties.cc
@@ -4,11 +4,12 @@
 
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
+#include "src/compiler/js-operator.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/verifier.h"
-#include "src/types-inl.h"
+#include "src/handles-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -123,6 +124,7 @@
 
 // static
 bool NodeProperties::IsExceptionalCall(Node* node) {
+  if (node->op()->HasProperty(Operator::kNoThrow)) return false;
   for (Edge const edge : node->use_edges()) {
     if (!NodeProperties::IsControlEdge(edge)) continue;
     if (edge.from()->opcode() == IrOpcode::kIfException) return true;
@@ -334,6 +336,16 @@
     Node* node, MaybeHandle<Context> native_context) {
   while (true) {
     switch (node->opcode()) {
+      case IrOpcode::kJSLoadContext: {
+        ContextAccess const& access = ContextAccessOf(node->op());
+        if (access.index() != Context::NATIVE_CONTEXT_INDEX) {
+          return MaybeHandle<Context>();
+        }
+        // Skip over the intermediate contexts, we're only interested in the
+        // very last context in the context chain anyway.
+        node = NodeProperties::GetContextInput(node);
+        break;
+      }
       case IrOpcode::kJSCreateBlockContext:
       case IrOpcode::kJSCreateCatchContext:
       case IrOpcode::kJSCreateFunctionContext:
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index a97fdfa..c78e15e 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -128,7 +128,6 @@
 #define JS_CONTEXT_OP_LIST(V) \
   V(JSLoadContext)            \
   V(JSStoreContext)           \
-  V(JSLoadDynamic)            \
   V(JSCreateFunctionContext)  \
   V(JSCreateCatchContext)     \
   V(JSCreateWithContext)      \
@@ -202,6 +201,7 @@
   V(StoreBuffer)                   \
   V(StoreElement)                  \
   V(ObjectIsNumber)                \
+  V(ObjectIsReceiver)              \
   V(ObjectIsSmi)
 
 // Opcodes for Machine-level operators.
@@ -227,6 +227,7 @@
   MACHINE_COMPARE_BINOP_LIST(V) \
   V(Load)                       \
   V(Store)                      \
+  V(StackSlot)                  \
   V(Word32And)                  \
   V(Word32Or)                   \
   V(Word32Xor)                  \
@@ -236,6 +237,7 @@
   V(Word32Ror)                  \
   V(Word32Clz)                  \
   V(Word32Ctz)                  \
+  V(Word32ReverseBits)          \
   V(Word32Popcnt)               \
   V(Word64Popcnt)               \
   V(Word64And)                  \
@@ -247,6 +249,7 @@
   V(Word64Ror)                  \
   V(Word64Clz)                  \
   V(Word64Ctz)                  \
+  V(Word64ReverseBits)          \
   V(Int32Add)                   \
   V(Int32AddWithOverflow)       \
   V(Int32Sub)                   \
@@ -270,6 +273,8 @@
   V(ChangeFloat32ToFloat64)     \
   V(ChangeFloat64ToInt32)       \
   V(ChangeFloat64ToUint32)      \
+  V(TruncateFloat32ToInt32)     \
+  V(TruncateFloat32ToUint32)    \
   V(TryTruncateFloat32ToInt64)  \
   V(TryTruncateFloat64ToInt64)  \
   V(TryTruncateFloat32ToUint64) \
@@ -281,8 +286,10 @@
   V(TruncateFloat64ToFloat32)   \
   V(TruncateFloat64ToInt32)     \
   V(TruncateInt64ToInt32)       \
+  V(RoundInt32ToFloat32)        \
   V(RoundInt64ToFloat32)        \
   V(RoundInt64ToFloat64)        \
+  V(RoundUint32ToFloat32)       \
   V(RoundUint64ToFloat32)       \
   V(RoundUint64ToFloat64)       \
   V(BitcastFloat32ToInt32)      \
@@ -321,6 +328,7 @@
   V(Float64InsertHighWord32)    \
   V(LoadStackPointer)           \
   V(LoadFramePointer)           \
+  V(LoadParentFramePointer)     \
   V(CheckedLoad)                \
   V(CheckedStore)
 
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
index bd704a3..1ee31d5 100644
--- a/src/compiler/operator-properties.cc
+++ b/src/compiler/operator-properties.cc
@@ -55,7 +55,6 @@
     case IrOpcode::kJSCreateLiteralRegExp:
 
     // Context operations
-    case IrOpcode::kJSLoadDynamic:
     case IrOpcode::kJSCreateScriptContext:
 
     // Conversions
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 4d6aacd..21c34fc 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -30,8 +30,8 @@
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/js-builtin-reducer.h"
 #include "src/compiler/js-call-reducer.h"
-#include "src/compiler/js-context-relaxation.h"
 #include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-create-lowering.h"
 #include "src/compiler/js-frame-specialization.h"
 #include "src/compiler/js-generic-lowering.h"
 #include "src/compiler/js-global-object-specialization.h"
@@ -276,11 +276,8 @@
         info()->isolate(), instruction_zone(), instruction_blocks);
   }
 
-  void InitializeRegisterAllocationData(const RegisterConfiguration* config,
-                                        CallDescriptor* descriptor,
-                                        const char* debug_name) {
+  void InitializeFrameData(CallDescriptor* descriptor) {
     DCHECK(frame_ == nullptr);
-    DCHECK(register_allocation_data_ == nullptr);
     int fixed_frame_size = 0;
     if (descriptor != nullptr) {
       fixed_frame_size = (descriptor->IsCFunctionCall())
@@ -289,6 +286,12 @@
                              : StandardFrameConstants::kFixedSlotCount;
     }
     frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
+  }
+
+  void InitializeRegisterAllocationData(const RegisterConfiguration* config,
+                                        CallDescriptor* descriptor,
+                                        const char* debug_name) {
+    DCHECK(register_allocation_data_ == nullptr);
     register_allocation_data_ = new (register_allocation_zone())
         RegisterAllocationData(config, register_allocation_zone(), frame(),
                                sequence(), debug_name);
@@ -512,7 +515,7 @@
     if (data->info()->shared_info()->HasBytecodeArray()) {
       BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
                                          data->jsgraph());
-      succeeded = graph_builder.CreateGraph(stack_check);
+      succeeded = graph_builder.CreateGraph();
     } else {
       AstGraphBuilderWithPositions graph_builder(
           temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
@@ -536,7 +539,7 @@
                                               data->common());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
-    JSCallReducer call_reducer(data->jsgraph(),
+    JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
                                data->info()->is_deoptimization_enabled()
                                    ? JSCallReducer::kDeoptimizationEnabled
                                    : JSCallReducer::kNoFlags,
@@ -549,17 +552,19 @@
     JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
                                                data->jsgraph());
     JSGlobalObjectSpecialization global_object_specialization(
-        &graph_reducer, data->jsgraph(),
-        data->info()->is_deoptimization_enabled()
-            ? JSGlobalObjectSpecialization::kDeoptimizationEnabled
-            : JSGlobalObjectSpecialization::kNoFlags,
-        data->native_context(), data->info()->dependencies());
+        &graph_reducer, data->jsgraph(), data->native_context(),
+        data->info()->dependencies());
+    JSNativeContextSpecialization::Flags flags =
+        JSNativeContextSpecialization::kNoFlags;
+    if (data->info()->is_bailout_on_uninitialized()) {
+      flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
+    }
+    if (data->info()->is_deoptimization_enabled()) {
+      flags |= JSNativeContextSpecialization::kDeoptimizationEnabled;
+    }
     JSNativeContextSpecialization native_context_specialization(
-        &graph_reducer, data->jsgraph(),
-        data->info()->is_deoptimization_enabled()
-            ? JSNativeContextSpecialization::kDeoptimizationEnabled
-            : JSNativeContextSpecialization::kNoFlags,
-        data->native_context(), data->info()->dependencies(), temp_zone);
+        &graph_reducer, data->jsgraph(), flags, data->native_context(),
+        data->info()->dependencies(), temp_zone);
     JSInliningHeuristic inlining(&graph_reducer,
                                  data->info()->is_inlining_enabled()
                                      ? JSInliningHeuristic::kGeneralInlining
@@ -570,7 +575,9 @@
     if (data->info()->is_frame_specializing()) {
       AddReducer(data, &graph_reducer, &frame_specialization);
     }
-    AddReducer(data, &graph_reducer, &global_object_specialization);
+    if (data->info()->is_deoptimization_enabled()) {
+      AddReducer(data, &graph_reducer, &global_object_specialization);
+    }
     AddReducer(data, &graph_reducer, &native_context_specialization);
     AddReducer(data, &graph_reducer, &context_specialization);
     AddReducer(data, &graph_reducer, &call_reducer);
@@ -610,6 +617,13 @@
                                               data->common());
     LoadElimination load_elimination(&graph_reducer);
     JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
+    MaybeHandle<LiteralsArray> literals_array =
+        data->info()->is_native_context_specializing()
+            ? handle(data->info()->closure()->literals(), data->isolate())
+            : MaybeHandle<LiteralsArray>();
+    JSCreateLowering create_lowering(
+        &graph_reducer, data->info()->dependencies(), data->jsgraph(),
+        literals_array, temp_zone);
     JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
     if (data->info()->is_deoptimization_enabled()) {
       typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
@@ -629,6 +643,9 @@
                                          data->common(), data->machine());
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &builtin_reducer);
+    if (data->info()->is_deoptimization_enabled()) {
+      AddReducer(data, &graph_reducer, &create_lowering);
+    }
     AddReducer(data, &graph_reducer, &typed_lowering);
     AddReducer(data, &graph_reducer, &intrinsic_lowering);
     AddReducer(data, &graph_reducer, &load_elimination);
@@ -664,8 +681,11 @@
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
                                          &escape_analysis, temp_zone);
+    escape_reducer.SetExistsVirtualAllocate(
+        escape_analysis.ExistsVirtualAllocate());
     AddReducer(data, &graph_reducer, &escape_reducer);
     graph_reducer.ReduceGraph();
+    escape_reducer.VerifyReplacement();
   }
 };
 
@@ -677,6 +697,13 @@
     SimplifiedLowering lowering(data->jsgraph(), temp_zone,
                                 data->source_positions());
     lowering.LowerAllNodes();
+
+    // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
+    if (lowering.abort_compilation_) {
+      data->set_compilation_failed();
+      return;
+    }
+
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
@@ -772,7 +799,6 @@
 
   void Run(PipelineData* data, Zone* temp_zone) {
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
-    JSContextRelaxation context_relaxing;
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -782,7 +808,6 @@
     SelectLowering select_lowering(data->jsgraph()->graph(),
                                    data->jsgraph()->common());
     TailCallOptimization tco(data->common(), data->graph());
-    AddReducer(data, &graph_reducer, &context_relaxing);
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &common_reducer);
     AddReducer(data, &graph_reducer, &generic_lowering);
@@ -813,7 +838,7 @@
   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
     InstructionSelector selector(
         temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
-        data->schedule(), data->source_positions(),
+        data->schedule(), data->source_positions(), data->frame(),
         data->info()->is_source_positions_enabled()
             ? InstructionSelector::kAllSourcePositions
             : InstructionSelector::kCallSourcePositions);
@@ -979,9 +1004,10 @@
 struct JumpThreadingPhase {
   static const char* phase_name() { return "jump threading"; }
 
-  void Run(PipelineData* data, Zone* temp_zone) {
+  void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
     ZoneVector<RpoNumber> result(temp_zone);
-    if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence())) {
+    if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(),
+                                         frame_at_start)) {
       JumpThreading::ApplyForwarding(result, data->sequence());
     }
   }
@@ -1053,13 +1079,6 @@
 
 
 Handle<Code> Pipeline::GenerateCode() {
-  // TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
-  // the correct solution is to restore the context register after invoking
-  // builtins from full-codegen.
-  if (Context::IsJSBuiltin(isolate()->native_context(), info()->closure())) {
-    return Handle<Code>::null();
-  }
-
   ZonePool zone_pool;
   base::SmartPointer<PipelineStatistics> pipeline_statistics;
 
@@ -1073,13 +1092,14 @@
     if (json_file != nullptr) {
       OFStream json_of(json_file);
       Handle<Script> script = info()->script();
-      FunctionLiteral* function = info()->literal();
       base::SmartArrayPointer<char> function_name = info()->GetDebugName();
       int pos = info()->shared_info()->start_position();
       json_of << "{\"function\":\"" << function_name.get()
               << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
-      if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+      if (info()->has_literal() && !script->IsUndefined() &&
+          !script->source()->IsUndefined()) {
         DisallowHeapAllocation no_allocation;
+        FunctionLiteral* function = info()->literal();
         int start = function->start_position();
         int len = function->end_position() - start;
         String::SubStringRange source(String::cast(script->source()), start,
@@ -1204,6 +1224,9 @@
   // Kill the Typer and thereby uninstall the decorator (if any).
   typer.Reset(nullptr);
 
+  // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
+  if (data.compilation_failed()) return Handle<Code>::null();
+
   return ScheduleAndGenerateCode(
       Linkage::ComputeIncoming(data.instruction_zone(), info()));
 }
@@ -1212,10 +1235,9 @@
 Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
                                                CallDescriptor* call_descriptor,
                                                Graph* graph, Schedule* schedule,
-                                               Code::Kind kind,
+                                               Code::Flags flags,
                                                const char* debug_name) {
-  CompilationInfo info(debug_name, isolate, graph->zone());
-  info.set_output_code_kind(kind);
+  CompilationInfo info(debug_name, isolate, graph->zone(), flags);
 
   // Construct a pipeline for scheduling and code generation.
   ZonePool zone_pool;
@@ -1286,6 +1308,7 @@
   PipelineData data(&zone_pool, &info, sequence);
   Pipeline pipeline(&info);
   pipeline.data_ = &data;
+  pipeline.data_->InitializeFrameData(nullptr);
   pipeline.AllocateRegisters(config, nullptr, run_verifier);
   return !data.compilation_failed();
 }
@@ -1308,6 +1331,7 @@
 
   data->InitializeInstructionSequence();
 
+  data->InitializeFrameData(call_descriptor);
   // Select and schedule instructions covering the scheduled graph.
   Linkage linkage(call_descriptor);
   Run<InstructionSelectionPhase>(&linkage);
@@ -1329,6 +1353,7 @@
   BeginPhaseKind("register allocation");
 
   bool run_verifier = FLAG_turbo_verify_allocation;
+
   // Allocate registers.
   AllocateRegisters(
       RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
@@ -1339,10 +1364,16 @@
   }
 
   BeginPhaseKind("code generation");
-
+  // TODO(mtrofin): move this off to the register allocator.
+  bool generate_frame_at_start =
+      !FLAG_turbo_frame_elision || !data_->info()->IsStub() ||
+      !data_->frame()->needs_frame() ||
+      data_->sequence()->instruction_blocks().front()->needs_frame() ||
+      linkage.GetIncomingDescriptor()->CalleeSavedFPRegisters() != 0 ||
+      linkage.GetIncomingDescriptor()->CalleeSavedRegisters() != 0;
   // Optimimize jumps.
   if (FLAG_turbo_jt) {
-    Run<JumpThreadingPhase>();
+    Run<JumpThreadingPhase>(generate_frame_at_start);
   }
 
   // Generate final machine code.
@@ -1446,7 +1477,8 @@
     Run<MergeSplintersPhase>();
   }
 
-  if (FLAG_turbo_frame_elision) {
+  // We plan to enable frame elision only for stubs and bytecode handlers.
+  if (FLAG_turbo_frame_elision && info()->IsStub()) {
     Run<LocateSpillSlotsPhase>();
     Run<FrameElisionPhase>();
   }
@@ -1482,6 +1514,8 @@
   data->DeleteRegisterAllocationZone();
 }
 
+Isolate* Pipeline::isolate() const { return info()->isolate(); }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
index af94018..edb8191 100644
--- a/src/compiler/pipeline.h
+++ b/src/compiler/pipeline.h
@@ -7,11 +7,12 @@
 
 // Clients of this interface shouldn't depend on lots of compiler internals.
 // Do not include anything from src/compiler here!
-#include "src/compiler.h"
+#include "src/objects.h"
 
 namespace v8 {
 namespace internal {
 
+class CompilationInfo;
 class RegisterConfiguration;
 
 namespace compiler {
@@ -35,7 +36,7 @@
   static Handle<Code> GenerateCodeForCodeStub(Isolate* isolate,
                                               CallDescriptor* call_descriptor,
                                               Graph* graph, Schedule* schedule,
-                                              Code::Kind kind,
+                                              Code::Flags flags,
                                               const char* debug_name);
 
   // Run the pipeline on a machine graph and generate code. If {schedule} is
@@ -57,23 +58,27 @@
                                              Schedule* schedule = nullptr);
 
  private:
-  CompilationInfo* info_;
-  PipelineData* data_;
-
   // Helpers for executing pipeline phases.
   template <typename Phase>
   void Run();
   template <typename Phase, typename Arg0>
   void Run(Arg0 arg_0);
-
-  CompilationInfo* info() const { return info_; }
-  Isolate* isolate() { return info_->isolate(); }
+  template <typename Phase, typename Arg0, typename Arg1>
+  void Run(Arg0 arg_0, Arg1 arg_1);
 
   void BeginPhaseKind(const char* phase_kind);
   void RunPrintAndVerify(const char* phase, bool untyped = false);
   Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
   void AllocateRegisters(const RegisterConfiguration* config,
                          CallDescriptor* descriptor, bool run_verifier);
+
+  CompilationInfo* info() const { return info_; }
+  Isolate* isolate() const;
+
+  CompilationInfo* const info_;
+  PipelineData* data_;
+
+  DISALLOW_COPY_AND_ASSIGN(Pipeline);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index 154cd64..7fc6dd9 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -167,6 +167,19 @@
       : OutOfLineCode(gen),
         object_(object),
         offset_(offset),
+        offset_immediate_(0),
+        value_(value),
+        scratch0_(scratch0),
+        scratch1_(scratch1),
+        mode_(mode) {}
+
+  OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
+                       Register value, Register scratch0, Register scratch1,
+                       RecordWriteMode mode)
+      : OutOfLineCode(gen),
+        object_(object),
+        offset_(no_reg),
+        offset_immediate_(offset),
         value_(value),
         scratch0_(scratch0),
         scratch1_(scratch1),
@@ -176,24 +189,39 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, eq,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, eq,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
-    // TODO(turbofan): Once we get frame elision working, we need to save
-    // and restore lr properly here if the frame was elided.
+    if (!frame()->needs_frame()) {
+      // We need to save and restore lr if the frame was elided.
+      __ mflr(scratch1_);
+      __ Push(scratch1_);
+    }
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
-    __ add(scratch1_, object_, offset_);
+                         remembered_set_action, save_fp_mode);
+    if (offset_.is(no_reg)) {
+      __ addi(scratch1_, object_, Operand(offset_immediate_));
+    } else {
+      DCHECK_EQ(0, offset_immediate_);
+      __ add(scratch1_, object_, offset_);
+    }
     __ CallStub(&stub);
+    if (!frame()->needs_frame()) {
+      // We need to save and restore lr if the frame was elided.
+      __ Pop(scratch1_);
+      __ mtlr(scratch1_);
+    }
   }
 
  private:
   Register const object_;
   Register const offset_;
+  int32_t const offset_immediate_;  // Valid if offset_.is(no_reg).
   Register const value_;
   Register const scratch0_;
   Register const scratch1_;
@@ -651,13 +679,7 @@
     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
   }
   if (frame()->needs_frame()) {
-    if (FLAG_enable_embedded_constant_pool) {
-      __ LoadP(kConstantPoolRegister,
-               MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
-    }
-    __ LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
-    __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-    __ mtlr(r0);
+    __ RestoreFrameStateForTailCall();
   }
   frame_access_state()->SetFrameAccessToSP();
 }
@@ -740,13 +762,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
-          masm());
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       int const num_parameters = MiscField::decode(instr->opcode());
       __ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -807,6 +822,13 @@
       __ mr(i.OutputRegister(), fp);
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
+      } else {
+        __ mr(i.OutputRegister(), fp);
+      }
+      break;
     case kArchTruncateDoubleToI:
       // TODO(mbrandy): move slow call to stub out of line.
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -816,19 +838,38 @@
       RecordWriteMode mode =
           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
       Register object = i.InputRegister(0);
-      Register offset = i.InputRegister(1);
       Register value = i.InputRegister(2);
       Register scratch0 = i.TempRegister(0);
       Register scratch1 = i.TempRegister(1);
-      auto ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
-                                                   scratch0, scratch1, mode);
-      __ StorePX(value, MemOperand(object, offset));
+      OutOfLineRecordWrite* ool;
+
+      AddressingMode addressing_mode =
+          AddressingModeField::decode(instr->opcode());
+      if (addressing_mode == kMode_MRI) {
+        int32_t offset = i.InputInt32(1);
+        ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+                                                scratch0, scratch1, mode);
+        __ StoreP(value, MemOperand(object, offset));
+      } else {
+        DCHECK_EQ(kMode_MRR, addressing_mode);
+        Register offset(i.InputRegister(1));
+        ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+                                                scratch0, scratch1, mode);
+        __ StorePX(value, MemOperand(object, offset));
+      }
       __ CheckPageFlag(object, scratch0,
                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
                        ool->entry());
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      __ addi(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+              Operand(offset.offset()));
+      break;
+    }
     case kPPC_And:
       if (HasRegisterInput(instr, 1)) {
         __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -1194,10 +1235,19 @@
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
 #endif
+    case kPPC_Int32ToFloat32:
+      __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
     case kPPC_Int32ToDouble:
       __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
+    case kPPC_Uint32ToFloat32:
+      __ ConvertUnsignedIntToFloat(i.InputRegister(0),
+                                   i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveRC, i.OutputRCBit());
+      break;
     case kPPC_Uint32ToDouble:
       __ ConvertUnsignedIntToDouble(i.InputRegister(0),
                                     i.OutputDoubleRegister());
@@ -1581,8 +1631,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
index a3bf80e..877ebb5 100644
--- a/src/compiler/ppc/instruction-codes-ppc.h
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -82,7 +82,9 @@
   V(PPC_Int64ToDouble)             \
   V(PPC_Uint64ToFloat32)           \
   V(PPC_Uint64ToDouble)            \
+  V(PPC_Int32ToFloat32)            \
   V(PPC_Int32ToDouble)             \
+  V(PPC_Uint32ToFloat32)           \
   V(PPC_Uint32ToDouble)            \
   V(PPC_Float32ToDouble)           \
   V(PPC_DoubleToInt32)             \
@@ -114,7 +116,6 @@
   V(PPC_StoreFloat32)              \
   V(PPC_StoreDouble)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
index fc90cdd..fd1df6a 100644
--- a/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -81,7 +81,9 @@
     case kPPC_Int64ToDouble:
     case kPPC_Uint64ToFloat32:
     case kPPC_Uint64ToDouble:
+    case kPPC_Int32ToFloat32:
     case kPPC_Int32ToDouble:
+    case kPPC_Uint32ToFloat32:
     case kPPC_Uint32ToDouble:
     case kPPC_Float32ToDouble:
     case kPPC_DoubleToInt32:
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index f6ebbdf..244e6f4 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -200,6 +200,7 @@
 #else
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -227,13 +228,25 @@
   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
   MachineRepresentation rep = store_rep.representation();
 
-  // TODO(ppc): I guess this could be done in a better way.
   if (write_barrier_kind != kNoWriteBarrier) {
     DCHECK_EQ(MachineRepresentation::kTagged, rep);
+    AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
     inputs[input_count++] = g.UseUniqueRegister(base);
-    inputs[input_count++] = g.UseUniqueRegister(offset);
+    // OutOfLineRecordWrite uses the offset in an 'add' instruction as well as
+    // for the store itself, so we must check compatibility with both.
+    if (g.CanBeImmediate(offset, kInt16Imm)
+#if V8_TARGET_ARCH_PPC64
+        && g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)
+#endif
+            ) {
+      inputs[input_count++] = g.UseImmediate(offset);
+      addressing_mode = kMode_MRI;
+    } else {
+      inputs[input_count++] = g.UseUniqueRegister(offset);
+      addressing_mode = kMode_MRR;
+    }
     inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
                                 ? g.UseRegister(value)
                                 : g.UseUniqueRegister(value);
@@ -255,6 +268,7 @@
     InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
     size_t const temp_count = arraysize(temps);
     InstructionCode code = kArchStoreWithWriteBarrier;
+    code |= AddressingModeField::encode(addressing_mode);
     code |= MiscField::encode(static_cast<int>(record_write_mode));
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
@@ -289,6 +303,7 @@
 #else
       case MachineRepresentation::kWord64:  // Fall through.
 #endif
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -340,6 +355,7 @@
 #if !V8_TARGET_ARCH_PPC64
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -385,6 +401,7 @@
 #if !V8_TARGET_ARCH_PPC64
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -825,6 +842,14 @@
 #endif
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+#endif
+
+
 void InstructionSelector::VisitInt32Add(Node* node) {
   VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
 }
@@ -940,6 +965,16 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  VisitRR(this, kPPC_Int32ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  VisitRR(this, kPPC_Uint32ToFloat32, node);
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   VisitRR(this, kPPC_Int32ToDouble, node);
 }
@@ -1010,6 +1045,16 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  VisitRR(this, kPPC_DoubleToInt32, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  VisitRR(this, kPPC_DoubleToUint32, node);
+}
+
+
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   // TODO(mbrandy): inspect input to see if nop is appropriate.
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 4df2bde..0d4b8cb 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -63,7 +63,7 @@
 void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
                                  RawMachineLabel* false_val) {
   DCHECK(current_block_ != schedule()->end());
-  Node* branch = AddNode(common()->Branch(), condition);
+  Node* branch = MakeNode(common()->Branch(), 1, &condition);
   schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
   current_block_ = nullptr;
 }
@@ -152,6 +152,19 @@
   return AddNode(common()->Call(desc), input_count, buffer);
 }
 
+Node* RawMachineAssembler::CallRuntime0(Runtime::FunctionId function,
+                                        Node* context) {
+  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, 0, Operator::kNoProperties, CallDescriptor::kNoFlags);
+  int return_count = static_cast<int>(descriptor->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(0);
+
+  return AddNode(common()->Call(descriptor), centry, ref, arity, context);
+}
 
 Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
                                         Node* arg1, Node* context) {
@@ -183,6 +196,21 @@
                  context);
 }
 
+Node* RawMachineAssembler::CallRuntime3(Runtime::FunctionId function,
+                                        Node* arg1, Node* arg2, Node* arg3,
+                                        Node* context) {
+  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, 3, Operator::kNoProperties, CallDescriptor::kNoFlags);
+  int return_count = static_cast<int>(descriptor->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(3);
+
+  return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, ref,
+                 arity, context);
+}
 
 Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
                                         Node* arg1, Node* arg2, Node* arg3,
@@ -266,6 +294,51 @@
   return tail_call;
 }
 
+Node* RawMachineAssembler::TailCallRuntime3(Runtime::FunctionId function,
+                                            Node* arg1, Node* arg2, Node* arg3,
+                                            Node* context) {
+  const int kArity = 3;
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, kArity, Operator::kNoProperties,
+      CallDescriptor::kSupportsTailCalls);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(kArity);
+
+  Node* nodes[] = {centry, arg1, arg2, arg3, ref, arity, context};
+  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+  schedule()->AddTailCall(CurrentBlock(), tail_call);
+  current_block_ = nullptr;
+  return tail_call;
+}
+
+Node* RawMachineAssembler::TailCallRuntime4(Runtime::FunctionId function,
+                                            Node* arg1, Node* arg2, Node* arg3,
+                                            Node* arg4, Node* context) {
+  const int kArity = 4;
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, kArity, Operator::kNoProperties,
+      CallDescriptor::kSupportsTailCalls);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+  Node* ref = AddNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(kArity);
+
+  Node* nodes[] = {centry, arg1, arg2, arg3, arg4, ref, arity, context};
+  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+  NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+  schedule()->AddTailCall(CurrentBlock(), tail_call);
+  current_block_ = nullptr;
+  return tail_call;
+}
 
 Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
                                           Node* function) {
@@ -354,9 +427,24 @@
   return current_block_;
 }
 
+Node* RawMachineAssembler::Phi(MachineRepresentation rep, int input_count,
+                               Node* const* inputs) {
+  Node** buffer = new (zone()->New(sizeof(Node*) * (input_count + 1)))
+      Node*[input_count + 1];
+  std::copy(inputs, inputs + input_count, buffer);
+  buffer[input_count] = graph()->start();
+  return AddNode(common()->Phi(rep, input_count), input_count + 1, buffer);
+}
+
+void RawMachineAssembler::AppendPhiInput(Node* phi, Node* new_input) {
+  const Operator* op = phi->op();
+  const Operator* new_op = common()->ResizeMergeOrPhi(op, phi->InputCount());
+  phi->InsertInput(zone(), phi->InputCount() - 1, new_input);
+  NodeProperties::ChangeOp(phi, new_op);
+}
 
 Node* RawMachineAssembler::AddNode(const Operator* op, int input_count,
-                                   Node** inputs) {
+                                   Node* const* inputs) {
   DCHECK_NOT_NULL(schedule_);
   DCHECK_NOT_NULL(current_block_);
   Node* node = MakeNode(op, input_count, inputs);
@@ -364,9 +452,8 @@
   return node;
 }
 
-
 Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
-                                    Node** inputs) {
+                                    Node* const* inputs) {
   // The raw machine assembler nodes do not have effect and control inputs,
   // so we disable checking input counts here.
   return graph()->NewNodeUnchecked(op, input_count, inputs);
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index 5c232ed..a0cb7a0 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -79,6 +79,9 @@
   Node* Int32Constant(int32_t value) {
     return AddNode(common()->Int32Constant(value));
   }
+  Node* StackSlot(MachineRepresentation rep) {
+    return AddNode(machine()->StackSlot(rep));
+  }
   Node* Int64Constant(int64_t value) {
     return AddNode(common()->Int64Constant(value));
   }
@@ -147,7 +150,7 @@
     return AddNode(machine()->WordEqual(), a, b);
   }
   Node* WordNotEqual(Node* a, Node* b) {
-    return WordBinaryNot(WordEqual(a, b));
+    return Word32BinaryNot(WordEqual(a, b));
   }
   Node* WordNot(Node* a) {
     if (machine()->Is32()) {
@@ -156,13 +159,6 @@
       return Word64Not(a);
     }
   }
-  Node* WordBinaryNot(Node* a) {
-    if (machine()->Is32()) {
-      return Word32BinaryNot(a);
-    } else {
-      return Word64BinaryNot(a);
-    }
-  }
 
   Node* Word32And(Node* a, Node* b) {
     return AddNode(machine()->Word32And(), a, b);
@@ -221,10 +217,9 @@
     return AddNode(machine()->Word64Equal(), a, b);
   }
   Node* Word64NotEqual(Node* a, Node* b) {
-    return Word64BinaryNot(Word64Equal(a, b));
+    return Word32BinaryNot(Word64Equal(a, b));
   }
   Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
-  Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
 
   Node* Int32Add(Node* a, Node* b) {
     return AddNode(machine()->Int32Add(), a, b);
@@ -275,6 +270,10 @@
   Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
     return Int32LessThanOrEqual(b, a);
   }
+  Node* Uint32GreaterThan(Node* a, Node* b) { return Uint32LessThan(b, a); }
+  Node* Uint32GreaterThanOrEqual(Node* a, Node* b) {
+    return Uint32LessThanOrEqual(b, a);
+  }
   Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
 
   Node* Int64Add(Node* a, Node* b) {
@@ -315,6 +314,10 @@
   Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
     return Int64LessThanOrEqual(b, a);
   }
+  Node* Uint64GreaterThan(Node* a, Node* b) { return Uint64LessThan(b, a); }
+  Node* Uint64GreaterThanOrEqual(Node* a, Node* b) {
+    return Uint64LessThanOrEqual(b, a);
+  }
   Node* Uint64Div(Node* a, Node* b) {
     return AddNode(machine()->Uint64Div(), a, b);
   }
@@ -339,6 +342,19 @@
 
 #undef INTPTR_BINOP
 
+#define UINTPTR_BINOP(prefix, name)                    \
+  Node* UintPtr##name(Node* a, Node* b) {              \
+    return kPointerSize == 8 ? prefix##64##name(a, b)  \
+                             : prefix##32##name(a, b); \
+  }
+
+  UINTPTR_BINOP(Uint, LessThan);
+  UINTPTR_BINOP(Uint, LessThanOrEqual);
+  UINTPTR_BINOP(Uint, GreaterThanOrEqual);
+  UINTPTR_BINOP(Uint, GreaterThan);
+
+#undef UINTPTR_BINOP
+
   Node* Float32Add(Node* a, Node* b) {
     return AddNode(machine()->Float32Add(), a, b);
   }
@@ -363,7 +379,7 @@
     return AddNode(machine()->Float32Equal(), a, b);
   }
   Node* Float32NotEqual(Node* a, Node* b) {
-    return WordBinaryNot(Float32Equal(a, b));
+    return Word32BinaryNot(Float32Equal(a, b));
   }
   Node* Float32LessThan(Node* a, Node* b) {
     return AddNode(machine()->Float32LessThan(), a, b);
@@ -403,7 +419,7 @@
     return AddNode(machine()->Float64Equal(), a, b);
   }
   Node* Float64NotEqual(Node* a, Node* b) {
-    return WordBinaryNot(Float64Equal(a, b));
+    return Word32BinaryNot(Float64Equal(a, b));
   }
   Node* Float64LessThan(Node* a, Node* b) {
     return AddNode(machine()->Float64LessThan(), a, b);
@@ -432,10 +448,11 @@
   Node* ChangeFloat64ToUint32(Node* a) {
     return AddNode(machine()->ChangeFloat64ToUint32(), a);
   }
-  Node* TruncateFloat32ToInt64(Node* a) {
-    // TODO(ahaas): Remove this function as soon as it is not used anymore in
-    // WebAssembly.
-    return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
+  Node* TruncateFloat32ToInt32(Node* a) {
+    return AddNode(machine()->TruncateFloat32ToInt32(), a);
+  }
+  Node* TruncateFloat32ToUint32(Node* a) {
+    return AddNode(machine()->TruncateFloat32ToUint32(), a);
   }
   Node* TryTruncateFloat32ToInt64(Node* a) {
     return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
@@ -448,11 +465,6 @@
   Node* TryTruncateFloat64ToInt64(Node* a) {
     return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
   }
-  Node* TruncateFloat32ToUint64(Node* a) {
-    // TODO(ahaas): Remove this function as soon as it is not used anymore in
-    // WebAssembly.
-    return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
-  }
   Node* TryTruncateFloat32ToUint64(Node* a) {
     return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
   }
@@ -479,12 +491,18 @@
   Node* TruncateInt64ToInt32(Node* a) {
     return AddNode(machine()->TruncateInt64ToInt32(), a);
   }
+  Node* RoundInt32ToFloat32(Node* a) {
+    return AddNode(machine()->RoundInt32ToFloat32(), a);
+  }
   Node* RoundInt64ToFloat32(Node* a) {
     return AddNode(machine()->RoundInt64ToFloat32(), a);
   }
   Node* RoundInt64ToFloat64(Node* a) {
     return AddNode(machine()->RoundInt64ToFloat64(), a);
   }
+  Node* RoundUint32ToFloat32(Node* a) {
+    return AddNode(machine()->RoundUint32ToFloat32(), a);
+  }
   Node* RoundUint64ToFloat32(Node* a) {
     return AddNode(machine()->RoundUint64ToFloat32(), a);
   }
@@ -548,6 +566,9 @@
   // Stack operations.
   Node* LoadStackPointer() { return AddNode(machine()->LoadStackPointer()); }
   Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); }
+  Node* LoadParentFramePointer() {
+    return AddNode(machine()->LoadParentFramePointer());
+  }
 
   // Parameters.
   Node* Parameter(size_t index);
@@ -568,11 +589,16 @@
   // Call a given call descriptor and the given arguments and frame-state.
   Node* CallNWithFrameState(CallDescriptor* desc, Node* function, Node** args,
                             Node* frame_state);
+  // Call to a runtime function with zero arguments.
+  Node* CallRuntime0(Runtime::FunctionId function, Node* context);
   // Call to a runtime function with one arguments.
   Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
   // Call to a runtime function with two arguments.
   Node* CallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
                      Node* context);
+  // Call to a runtime function with three arguments.
+  Node* CallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
+                     Node* arg3, Node* context);
   // Call to a runtime function with four arguments.
   Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
                      Node* arg3, Node* arg4, Node* context);
@@ -602,7 +628,12 @@
   // Tail call to a runtime function with two arguments.
   Node* TailCallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
                          Node* context);
-
+  // Tail call to a runtime function with three arguments.
+  Node* TailCallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
+                         Node* arg3, Node* context);
+  // Tail call to a runtime function with four arguments.
+  Node* TailCallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
+                         Node* arg3, Node* arg4, Node* context);
 
   // ===========================================================================
   // The following utility methods deal with control flow, hence might switch
@@ -622,24 +653,26 @@
 
   // Variables.
   Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) {
-    return AddNode(common()->Phi(rep, 2), n1, n2);
+    return AddNode(common()->Phi(rep, 2), n1, n2, graph()->start());
   }
   Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3) {
-    return AddNode(common()->Phi(rep, 3), n1, n2, n3);
+    return AddNode(common()->Phi(rep, 3), n1, n2, n3, graph()->start());
   }
   Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3, Node* n4) {
-    return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4);
+    return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4, graph()->start());
   }
+  Node* Phi(MachineRepresentation rep, int input_count, Node* const* inputs);
+  void AppendPhiInput(Node* phi, Node* new_input);
 
   // ===========================================================================
   // The following generic node creation methods can be used for operators that
   // are not covered by the above utility methods. There should rarely be a need
   // to do that outside of testing though.
 
-  Node* AddNode(const Operator* op, int input_count, Node** inputs);
+  Node* AddNode(const Operator* op, int input_count, Node* const* inputs);
 
   Node* AddNode(const Operator* op) {
-    return AddNode(op, 0, static_cast<Node**>(nullptr));
+    return AddNode(op, 0, static_cast<Node* const*>(nullptr));
   }
 
   template <class... TArgs>
@@ -649,7 +682,7 @@
   }
 
  private:
-  Node* MakeNode(const Operator* op, int input_count, Node** inputs);
+  Node* MakeNode(const Operator* op, int input_count, Node* const* inputs);
   BasicBlock* Use(RawMachineLabel* label);
   BasicBlock* EnsureBlock(RawMachineLabel* label);
   BasicBlock* CurrentBlock();
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index 463795e..0b12e14 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -578,7 +578,26 @@
             CHECK_EQ(succ_vreg, pred_val.second->define_vreg);
           }
           if (pred_val.second->succ_vreg != kInvalidVreg) {
-            CHECK_EQ(succ_vreg, pred_val.second->succ_vreg);
+            if (succ_vreg != pred_val.second->succ_vreg) {
+              // When a block introduces 2 identical phis A and B, and both are
+              // operands to other phis C and D, and we optimized the moves
+              // defining A or B such that they now appear in the block defining
+              // A and B, the back propagation will get confused when visiting
+              // upwards from C and D. The operand in the block defining A and B
+              // will be attributed to C (or D, depending which of these is
+              // visited first).
+              CHECK(IsPhi(pred_val.second->succ_vreg));
+              CHECK(IsPhi(succ_vreg));
+              const PhiData* current_phi = GetPhi(succ_vreg);
+              const PhiData* assigned_phi = GetPhi(pred_val.second->succ_vreg);
+              CHECK_EQ(current_phi->operands.size(),
+                       assigned_phi->operands.size());
+              CHECK_EQ(current_phi->definition_rpo,
+                       assigned_phi->definition_rpo);
+              for (size_t i = 0; i < current_phi->operands.size(); ++i) {
+                CHECK_EQ(current_phi->operands[i], assigned_phi->operands[i]);
+              }
+            }
           } else {
             pred_val.second->succ_vreg = succ_vreg;
             block_ids.insert(pred_rpo.ToSize());
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 232ad9f..02ba1f1 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -104,6 +104,8 @@
     case MachineRepresentation::kWord64:
     case MachineRepresentation::kFloat64:
       return 8;
+    case MachineRepresentation::kSimd128:
+      return 16;
     case MachineRepresentation::kNone:
       break;
   }
@@ -113,6 +115,165 @@
 
 }  // namespace
 
+class LiveRangeBound {
+ public:
+  explicit LiveRangeBound(LiveRange* range, bool skip)
+      : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
+    DCHECK(!range->IsEmpty());
+  }
+
+  bool CanCover(LifetimePosition position) {
+    return start_ <= position && position < end_;
+  }
+
+  LiveRange* const range_;
+  const LifetimePosition start_;
+  const LifetimePosition end_;
+  const bool skip_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
+};
+
+
+struct FindResult {
+  LiveRange* cur_cover_;
+  LiveRange* pred_cover_;
+};
+
+
+class LiveRangeBoundArray {
+ public:
+  LiveRangeBoundArray() : length_(0), start_(nullptr) {}
+
+  bool ShouldInitialize() { return start_ == nullptr; }
+
+  void Initialize(Zone* zone, TopLevelLiveRange* range) {
+    length_ = range->GetChildCount();
+
+    start_ = zone->NewArray<LiveRangeBound>(length_);
+    LiveRangeBound* curr = start_;
+    // Normally, spilled ranges do not need connecting moves, because the spill
+    // location has been assigned at definition. For ranges spilled in deferred
+    // blocks, that is not the case, so we need to connect the spilled children.
+    for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
+      new (curr) LiveRangeBound(i, i->spilled());
+    }
+  }
+
+  LiveRangeBound* Find(const LifetimePosition position) const {
+    size_t left_index = 0;
+    size_t right_index = length_;
+    while (true) {
+      size_t current_index = left_index + (right_index - left_index) / 2;
+      DCHECK(right_index > current_index);
+      LiveRangeBound* bound = &start_[current_index];
+      if (bound->start_ <= position) {
+        if (position < bound->end_) return bound;
+        DCHECK(left_index < current_index);
+        left_index = current_index;
+      } else {
+        right_index = current_index;
+      }
+    }
+  }
+
+  LiveRangeBound* FindPred(const InstructionBlock* pred) {
+    LifetimePosition pred_end =
+        LifetimePosition::InstructionFromInstructionIndex(
+            pred->last_instruction_index());
+    return Find(pred_end);
+  }
+
+  LiveRangeBound* FindSucc(const InstructionBlock* succ) {
+    LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
+        succ->first_instruction_index());
+    return Find(succ_start);
+  }
+
+  bool FindConnectableSubranges(const InstructionBlock* block,
+                                const InstructionBlock* pred,
+                                FindResult* result) const {
+    LifetimePosition pred_end =
+        LifetimePosition::InstructionFromInstructionIndex(
+            pred->last_instruction_index());
+    LiveRangeBound* bound = Find(pred_end);
+    result->pred_cover_ = bound->range_;
+    LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
+        block->first_instruction_index());
+
+    if (bound->CanCover(cur_start)) {
+      // Both blocks are covered by the same range, so there is nothing to
+      // connect.
+      return false;
+    }
+    bound = Find(cur_start);
+    if (bound->skip_) {
+      return false;
+    }
+    result->cur_cover_ = bound->range_;
+    DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
+    return (result->cur_cover_ != result->pred_cover_);
+  }
+
+ private:
+  size_t length_;
+  LiveRangeBound* start_;
+
+  DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
+};
+
+
+class LiveRangeFinder {
+ public:
+  explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
+      : data_(data),
+        bounds_length_(static_cast<int>(data_->live_ranges().size())),
+        bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
+        zone_(zone) {
+    for (int i = 0; i < bounds_length_; ++i) {
+      new (&bounds_[i]) LiveRangeBoundArray();
+    }
+  }
+
+  LiveRangeBoundArray* ArrayFor(int operand_index) {
+    DCHECK(operand_index < bounds_length_);
+    TopLevelLiveRange* range = data_->live_ranges()[operand_index];
+    DCHECK(range != nullptr && !range->IsEmpty());
+    LiveRangeBoundArray* array = &bounds_[operand_index];
+    if (array->ShouldInitialize()) {
+      array->Initialize(zone_, range);
+    }
+    return array;
+  }
+
+ private:
+  const RegisterAllocationData* const data_;
+  const int bounds_length_;
+  LiveRangeBoundArray* const bounds_;
+  Zone* const zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
+};
+
+
+typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
+
+
+struct DelayedInsertionMapCompare {
+  bool operator()(const DelayedInsertionMapKey& a,
+                  const DelayedInsertionMapKey& b) const {
+    if (a.first == b.first) {
+      return a.second.Compare(b.second);
+    }
+    return a.first < b.first;
+  }
+};
+
+
+typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
+                DelayedInsertionMapCompare> DelayedInsertionMap;
+
 
 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
                          void* hint, UsePositionHintType hint_type)
@@ -734,51 +895,13 @@
       gap_index, operand, spill_move_insertion_locations_);
 }
 
-
-bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
-    InstructionSequence* code, const InstructionOperand& spill_operand) {
-  if (!IsSpilledOnlyInDeferredBlocks()) return false;
-
-  TRACE("Live Range %d will be spilled only in deferred blocks.\n", vreg());
-  // If we have ranges that aren't spilled but require the operand on the stack,
-  // make sure we insert the spill.
-  for (const LiveRange* child = this; child != nullptr; child = child->next()) {
-    if (!child->spilled() &&
-        child->NextSlotPosition(child->Start()) != nullptr) {
-      Instruction* instr =
-          code->InstructionAt(child->Start().ToInstructionIndex());
-      // Insert spill at the end to let live range connections happen at START.
-      ParallelMove* move =
-          instr->GetOrCreateParallelMove(Instruction::END, code->zone());
-      InstructionOperand assigned = child->GetAssignedOperand();
-      if (TopLevel()->has_slot_use()) {
-        bool found = false;
-        for (MoveOperands* move_op : *move) {
-          if (move_op->IsEliminated()) continue;
-          if (move_op->source().Equals(assigned) &&
-              move_op->destination().Equals(spill_operand)) {
-            found = true;
-            break;
-          }
-        }
-        if (found) continue;
-      }
-
-      move->AddMove(assigned, spill_operand);
-    }
-  }
-
-  return true;
-}
-
-
 void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
                                          const InstructionOperand& op,
                                          bool might_be_duplicated) {
-  DCHECK_IMPLIES(op.IsConstant(), spill_move_insertion_locations() == nullptr);
+  DCHECK_IMPLIES(op.IsConstant(), GetSpillMoveInsertionLocations() == nullptr);
   Zone* zone = sequence->zone();
 
-  for (SpillMoveInsertionList* to_spill = spill_move_insertion_locations();
+  for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations();
        to_spill != nullptr; to_spill = to_spill->next) {
     Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
     ParallelMove* move =
@@ -2321,12 +2444,15 @@
 
   const InstructionBlock* block = end_block;
   // Find header of outermost loop.
-  // TODO(titzer): fix redundancy below.
-  while (GetContainingLoop(code(), block) != nullptr &&
-         GetContainingLoop(code(), block)->rpo_number().ToInt() >
-             start_block->rpo_number().ToInt()) {
-    block = GetContainingLoop(code(), block);
-  }
+  do {
+    const InstructionBlock* loop = GetContainingLoop(code(), block);
+    if (loop == nullptr ||
+        loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
+      // No more loops or loop starts before the lifetime start.
+      break;
+    }
+    block = loop;
+  } while (true);
 
   // We did not find any suitable outer loop. Split at the latest possible
   // position unless end_block is a loop header itself.
@@ -2965,7 +3091,7 @@
       }
     } else {
       TopLevelLiveRange::SpillMoveInsertionList* spills =
-          range->spill_move_insertion_locations();
+          range->GetSpillMoveInsertionLocations();
       DCHECK_NOT_NULL(spills);
       for (; spills != nullptr; spills = spills->next) {
         code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
@@ -3032,12 +3158,10 @@
       // connecting move when a successor child range is spilled - because the
       // spilled range picks up its value from the slot which was assigned at
       // definition. For ranges that are determined to spill only in deferred
-      // blocks, we let ConnectLiveRanges and ResolveControlFlow insert such
-      // moves between ranges. Because of how the ranges are split around
-      // deferred blocks, this amounts to spilling and filling inside such
-      // blocks.
-      if (!top_range->TryCommitSpillInDeferredBlock(data()->code(),
-                                                    spill_operand)) {
+      // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
+      // where a spill operand is expected, and then finalize by inserting the
+      // spills in the deferred blocks dominators.
+      if (!top_range->IsSpilledOnlyInDeferredBlocks()) {
         // Spill at definition if the range isn't spilled only in deferred
         // blocks.
         top_range->CommitSpillMoves(
@@ -3188,171 +3312,6 @@
 }
 
 
-namespace {
-
-class LiveRangeBound {
- public:
-  explicit LiveRangeBound(const LiveRange* range, bool skip)
-      : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
-    DCHECK(!range->IsEmpty());
-  }
-
-  bool CanCover(LifetimePosition position) {
-    return start_ <= position && position < end_;
-  }
-
-  const LiveRange* const range_;
-  const LifetimePosition start_;
-  const LifetimePosition end_;
-  const bool skip_;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
-};
-
-
-struct FindResult {
-  const LiveRange* cur_cover_;
-  const LiveRange* pred_cover_;
-};
-
-
-class LiveRangeBoundArray {
- public:
-  LiveRangeBoundArray() : length_(0), start_(nullptr) {}
-
-  bool ShouldInitialize() { return start_ == nullptr; }
-
-  void Initialize(Zone* zone, const TopLevelLiveRange* const range) {
-    length_ = range->GetChildCount();
-
-    start_ = zone->NewArray<LiveRangeBound>(length_);
-    LiveRangeBound* curr = start_;
-    // Normally, spilled ranges do not need connecting moves, because the spill
-    // location has been assigned at definition. For ranges spilled in deferred
-    // blocks, that is not the case, so we need to connect the spilled children.
-    bool spilled_in_blocks = range->IsSpilledOnlyInDeferredBlocks();
-    for (const LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
-      new (curr) LiveRangeBound(i, !spilled_in_blocks && i->spilled());
-    }
-  }
-
-  LiveRangeBound* Find(const LifetimePosition position) const {
-    size_t left_index = 0;
-    size_t right_index = length_;
-    while (true) {
-      size_t current_index = left_index + (right_index - left_index) / 2;
-      DCHECK(right_index > current_index);
-      LiveRangeBound* bound = &start_[current_index];
-      if (bound->start_ <= position) {
-        if (position < bound->end_) return bound;
-        DCHECK(left_index < current_index);
-        left_index = current_index;
-      } else {
-        right_index = current_index;
-      }
-    }
-  }
-
-  LiveRangeBound* FindPred(const InstructionBlock* pred) {
-    LifetimePosition pred_end =
-        LifetimePosition::InstructionFromInstructionIndex(
-            pred->last_instruction_index());
-    return Find(pred_end);
-  }
-
-  LiveRangeBound* FindSucc(const InstructionBlock* succ) {
-    LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
-        succ->first_instruction_index());
-    return Find(succ_start);
-  }
-
-  bool FindConnectableSubranges(const InstructionBlock* block,
-                                const InstructionBlock* pred,
-                                FindResult* result) const {
-    LifetimePosition pred_end =
-        LifetimePosition::InstructionFromInstructionIndex(
-            pred->last_instruction_index());
-    LiveRangeBound* bound = Find(pred_end);
-    result->pred_cover_ = bound->range_;
-    LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
-        block->first_instruction_index());
-
-    if (bound->CanCover(cur_start)) {
-      // Both blocks are covered by the same range, so there is nothing to
-      // connect.
-      return false;
-    }
-    bound = Find(cur_start);
-    if (bound->skip_) {
-      return false;
-    }
-    result->cur_cover_ = bound->range_;
-    DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
-    return (result->cur_cover_ != result->pred_cover_);
-  }
-
- private:
-  size_t length_;
-  LiveRangeBound* start_;
-
-  DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
-};
-
-
-class LiveRangeFinder {
- public:
-  explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
-      : data_(data),
-        bounds_length_(static_cast<int>(data_->live_ranges().size())),
-        bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
-        zone_(zone) {
-    for (int i = 0; i < bounds_length_; ++i) {
-      new (&bounds_[i]) LiveRangeBoundArray();
-    }
-  }
-
-  LiveRangeBoundArray* ArrayFor(int operand_index) {
-    DCHECK(operand_index < bounds_length_);
-    TopLevelLiveRange* range = data_->live_ranges()[operand_index];
-    DCHECK(range != nullptr && !range->IsEmpty());
-    LiveRangeBoundArray* array = &bounds_[operand_index];
-    if (array->ShouldInitialize()) {
-      array->Initialize(zone_, range);
-    }
-    return array;
-  }
-
- private:
-  const RegisterAllocationData* const data_;
-  const int bounds_length_;
-  LiveRangeBoundArray* const bounds_;
-  Zone* const zone_;
-
-  DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
-};
-
-
-typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
-
-
-struct DelayedInsertionMapCompare {
-  bool operator()(const DelayedInsertionMapKey& a,
-                  const DelayedInsertionMapKey& b) const {
-    if (a.first == b.first) {
-      return a.second.Compare(b.second);
-    }
-    return a.first < b.first;
-  }
-};
-
-
-typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
-                DelayedInsertionMapCompare> DelayedInsertionMap;
-
-}  // namespace
-
-
 LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
     : data_(data) {}
 
@@ -3383,6 +3342,41 @@
         InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
         InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
         if (pred_op.Equals(cur_op)) continue;
+        if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
+          // We're doing a reload.
+          // We don't need to, if:
+          // 1) there's no register use in this block, and
+          // 2) the range ends before the block does, and
+          // 3) we don't have a successor, or the successor is spilled.
+          LifetimePosition block_start =
+              LifetimePosition::GapFromInstructionIndex(block->code_start());
+          LifetimePosition block_end =
+              LifetimePosition::GapFromInstructionIndex(block->code_end());
+          const LiveRange* current = result.cur_cover_;
+          const LiveRange* successor = current->next();
+          if (current->End() < block_end &&
+              (successor == nullptr || successor->spilled())) {
+            // verify point 1: no register use. We can go to the end of the
+            // range, since it's all within the block.
+
+            bool uses_reg = false;
+            for (const UsePosition* use = current->NextUsePosition(block_start);
+                 use != nullptr; use = use->next()) {
+              if (use->operand()->IsAnyRegister()) {
+                uses_reg = true;
+                break;
+              }
+            }
+            if (!uses_reg) continue;
+          }
+          if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
+              pred_block->IsDeferred()) {
+            // The spill location should be defined in pred_block, so add
+            // pred_block to the list of blocks requiring a spill operand.
+            current->TopLevel()->GetListOfBlocksRequiringSpillOperands()->Add(
+                pred_block->rpo_number().ToInt());
+          }
+        }
         int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
         USE(move_loc);
         DCHECK_IMPLIES(
@@ -3393,6 +3387,16 @@
       iterator.Advance();
     }
   }
+
+  // At this stage, we collected blocks needing a spill operand from
+  // ConnectRanges and from ResolveControlFlow. Time to commit the spills for
+  // deferred blocks.
+  for (TopLevelLiveRange* top : data()->live_ranges()) {
+    if (top == nullptr || top->IsEmpty() ||
+        !top->IsSpilledOnlyInDeferredBlocks())
+      continue;
+    CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone);
+  }
 }
 
 
@@ -3430,7 +3434,7 @@
       LifetimePosition pos = second_range->Start();
       // Add gap move if the two live ranges touch and there is no block
       // boundary.
-      if (!connect_spilled && second_range->spilled()) continue;
+      if (second_range->spilled()) continue;
       if (first_range->End() != pos) continue;
       if (data()->IsBlockBoundary(pos) &&
           !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
@@ -3442,6 +3446,16 @@
       bool delay_insertion = false;
       Instruction::GapPosition gap_pos;
       int gap_index = pos.ToInstructionIndex();
+      if (connect_spilled && !prev_operand.IsAnyRegister() &&
+          cur_operand.IsAnyRegister()) {
+        const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
+        DCHECK(block->IsDeferred());
+        // Performing a reload in this block, meaning the spill operand must
+        // be defined here.
+        top_range->GetListOfBlocksRequiringSpillOperands()->Add(
+            block->rpo_number().ToInt());
+      }
+
       if (pos.IsGapPosition()) {
         gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
       } else {
@@ -3452,7 +3466,7 @@
         }
         gap_pos = delay_insertion ? Instruction::END : Instruction::START;
       }
-      // Fills or spills for spilled in deferred blocks ranges must happen
+      // Reloads or spills for spilled in deferred blocks ranges must happen
       // only in deferred blocks.
       DCHECK_IMPLIES(
           connect_spilled &&
@@ -3503,6 +3517,73 @@
 }
 
 
+void LiveRangeConnector::CommitSpillsInDeferredBlocks(
+    TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
+  DCHECK(range->IsSpilledOnlyInDeferredBlocks());
+  DCHECK(!range->spilled());
+
+  InstructionSequence* code = data()->code();
+  InstructionOperand spill_operand = range->GetSpillRangeOperand();
+
+  TRACE("Live Range %d will be spilled only in deferred blocks.\n",
+        range->vreg());
+  // If we have ranges that aren't spilled but require the operand on the stack,
+  // make sure we insert the spill.
+  for (const LiveRange* child = range; child != nullptr;
+       child = child->next()) {
+    for (const UsePosition* pos = child->first_pos(); pos != nullptr;
+         pos = pos->next()) {
+      if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
+        continue;
+      range->AddBlockRequiringSpillOperand(
+          code->GetInstructionBlock(pos->pos().ToInstructionIndex())
+              ->rpo_number());
+    }
+  }
+
+  ZoneQueue<int> worklist(temp_zone);
+
+  for (BitVector::Iterator iterator(
+           range->GetListOfBlocksRequiringSpillOperands());
+       !iterator.Done(); iterator.Advance()) {
+    worklist.push(iterator.Current());
+  }
+
+  // Seek the deferred blocks that dominate locations requiring spill operands,
+  // and spill there. We only need to spill at the start of such blocks.
+  BitVector done_blocks(
+      range->GetListOfBlocksRequiringSpillOperands()->length(), temp_zone);
+  while (!worklist.empty()) {
+    int block_id = worklist.front();
+    worklist.pop();
+    if (done_blocks.Contains(block_id)) continue;
+    done_blocks.Add(block_id);
+    const InstructionBlock* spill_block =
+        code->InstructionBlockAt(RpoNumber::FromInt(block_id));
+
+    for (const RpoNumber& pred : spill_block->predecessors()) {
+      const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
+
+      if (pred_block->IsDeferred()) {
+        worklist.push(pred_block->rpo_number().ToInt());
+      } else {
+        LifetimePosition pred_end =
+            LifetimePosition::InstructionFromInstructionIndex(
+                pred_block->last_instruction_index());
+
+        LiveRangeBound* bound = array->Find(pred_end);
+
+        InstructionOperand pred_op = bound->range_->GetAssignedOperand();
+
+        data()->AddGapMove(spill_block->first_instruction_index(),
+                           Instruction::GapPosition::START, pred_op,
+                           spill_operand);
+      }
+    }
+  }
+}
+
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index b96a43c..38fad05 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -579,14 +579,17 @@
   // and instead let the LiveRangeConnector perform the spills within the
   // deferred blocks. If so, we insert here spills for non-spilled ranges
   // with slot use positions.
-  void MarkSpilledInDeferredBlock() {
+  void TreatAsSpilledInDeferredBlock(Zone* zone, int total_block_count) {
     spill_start_index_ = -1;
     spilled_in_deferred_blocks_ = true;
     spill_move_insertion_locations_ = nullptr;
+    list_of_blocks_requiring_spill_operands_ =
+        new (zone) BitVector(total_block_count, zone);
   }
 
-  bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
-                                     const InstructionOperand& spill_operand);
+  void CommitSpillInDeferredBlocks(RegisterAllocationData* data,
+                                   const InstructionOperand& spill_operand,
+                                   BitVector* necessary_spill_points);
 
   TopLevelLiveRange* splintered_from() const { return splintered_from_; }
   bool IsSplinter() const { return splintered_from_ != nullptr; }
@@ -617,7 +620,8 @@
 
   struct SpillMoveInsertionList;
 
-  SpillMoveInsertionList* spill_move_insertion_locations() const {
+  SpillMoveInsertionList* GetSpillMoveInsertionLocations() const {
+    DCHECK(!IsSpilledOnlyInDeferredBlocks());
     return spill_move_insertion_locations_;
   }
   TopLevelLiveRange* splinter() const { return splinter_; }
@@ -634,6 +638,16 @@
   void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
   bool has_preassigned_slot() const { return has_preassigned_slot_; }
 
+  void AddBlockRequiringSpillOperand(RpoNumber block_id) {
+    DCHECK(IsSpilledOnlyInDeferredBlocks());
+    GetListOfBlocksRequiringSpillOperands()->Add(block_id.ToInt());
+  }
+
+  BitVector* GetListOfBlocksRequiringSpillOperands() const {
+    DCHECK(IsSpilledOnlyInDeferredBlocks());
+    return list_of_blocks_requiring_spill_operands_;
+  }
+
  private:
   void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
 
@@ -650,7 +664,12 @@
     InstructionOperand* spill_operand_;
     SpillRange* spill_range_;
   };
-  SpillMoveInsertionList* spill_move_insertion_locations_;
+
+  union {
+    SpillMoveInsertionList* spill_move_insertion_locations_;
+    BitVector* list_of_blocks_requiring_spill_operands_;
+  };
+
   // TODO(mtrofin): generalize spilling after definition, currently specialized
   // just for spill in a single deferred block.
   bool spilled_in_deferred_blocks_;
@@ -1125,6 +1144,7 @@
 };
 
 
+class LiveRangeBoundArray;
 // Insert moves of the form
 //
 //          Operand(child_(k+1)) = Operand(child_k)
@@ -1157,6 +1177,10 @@
                          const InstructionBlock* pred,
                          const InstructionOperand& pred_op);
 
+  void CommitSpillsInDeferredBlocks(TopLevelLiveRange* range,
+                                    LiveRangeBoundArray* array,
+                                    Zone* temp_zone);
+
   RegisterAllocationData* const data_;
 
   DISALLOW_COPY_AND_ASSIGN(LiveRangeConnector);
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index 5dab60f..2f7720b 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -97,7 +97,6 @@
 
 namespace {
 
-// TODO(titzer): should Word64 also be implicitly convertable to others?
 bool IsWord(MachineRepresentation rep) {
   return rep == MachineRepresentation::kWord8 ||
          rep == MachineRepresentation::kWord16 ||
@@ -146,6 +145,9 @@
       return GetWord32RepresentationFor(node, output_rep, output_type);
     case MachineRepresentation::kWord64:
       return GetWord64RepresentationFor(node, output_rep, output_type);
+    case MachineRepresentation::kSimd128:  // Fall through.
+      // TODO(bbudge) Handle conversions between tagged and untagged.
+      break;
     case MachineRepresentation::kNone:
       return node;
   }
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index 653fea8..ed7fe9d 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -142,6 +142,7 @@
     return UseInfo::TruncatingWord32();
     case MachineRepresentation::kBit:
       return UseInfo::Bool();
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       break;
   }
@@ -199,6 +200,9 @@
     case MachineRepresentation::kFloat64:
       return r2 == MachineRepresentation::kFloat64 ||
              r2 == MachineRepresentation::kTagged;
+    case MachineRepresentation::kSimd128:
+      return r2 == MachineRepresentation::kSimd128 ||
+             r2 == MachineRepresentation::kTagged;
     case MachineRepresentation::kTagged:
       return r2 == MachineRepresentation::kTagged;
   }
@@ -1189,10 +1193,18 @@
                   NodeOutputInfo(access.machine_type().representation(),
                                  NodeProperties::GetType(node));
             } else {
+              if (access.machine_type().representation() !=
+                  MachineRepresentation::kFloat64) {
+                // TODO(bmeurer): See comment on abort_compilation_.
+                if (lower()) lowering->abort_compilation_ = true;
+              }
               output_info = NodeOutputInfo::Float64();
             }
           }
         } else {
+          // TODO(bmeurer): See comment on abort_compilation_.
+          if (lower()) lowering->abort_compilation_ = true;
+
           // If undefined is not truncated away, we need to have the tagged
           // representation.
           output_info = NodeOutputInfo::AnyTagged();
@@ -1237,13 +1249,16 @@
       case IrOpcode::kObjectIsNumber: {
         ProcessInput(node, 0, UseInfo::AnyTagged());
         SetOutput(node, NodeOutputInfo::Bool());
-        if (lower()) lowering->DoObjectIsNumber(node);
+        break;
+      }
+      case IrOpcode::kObjectIsReceiver: {
+        ProcessInput(node, 0, UseInfo::AnyTagged());
+        SetOutput(node, NodeOutputInfo::Bool());
         break;
       }
       case IrOpcode::kObjectIsSmi: {
         ProcessInput(node, 0, UseInfo::AnyTagged());
         SetOutput(node, NodeOutputInfo::Bool());
-        if (lower()) lowering->DoObjectIsSmi(node);
         break;
       }
 
@@ -1388,6 +1403,7 @@
       case IrOpcode::kFloat64RoundDown:
       case IrOpcode::kFloat64RoundTruncate:
       case IrOpcode::kFloat64RoundTiesAway:
+      case IrOpcode::kFloat64RoundUp:
         return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
       case IrOpcode::kFloat64Equal:
       case IrOpcode::kFloat64LessThan:
@@ -1402,6 +1418,7 @@
                           NodeOutputInfo::Float64());
       case IrOpcode::kLoadStackPointer:
       case IrOpcode::kLoadFramePointer:
+      case IrOpcode::kLoadParentFramePointer:
         return VisitLeaf(node, NodeOutputInfo::Pointer());
       case IrOpcode::kStateValues:
         VisitStateValues(node);
@@ -1579,42 +1596,6 @@
 }
 
 
-void SimplifiedLowering::DoObjectIsNumber(Node* node) {
-  Node* input = NodeProperties::GetValueInput(node, 0);
-  // TODO(bmeurer): Optimize somewhat based on input type.
-  Node* check =
-      graph()->NewNode(machine()->WordEqual(),
-                       graph()->NewNode(machine()->WordAnd(), input,
-                                        jsgraph()->IntPtrConstant(kSmiTagMask)),
-                       jsgraph()->IntPtrConstant(kSmiTag));
-  Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = jsgraph()->Int32Constant(1);
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = graph()->NewNode(
-      machine()->WordEqual(),
-      graph()->NewNode(
-          machine()->Load(MachineType::AnyTagged()), input,
-          jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
-          graph()->start(), if_false),
-      jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
-  Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  node->ReplaceInput(0, vtrue);
-  node->AppendInput(graph()->zone(), vfalse);
-  node->AppendInput(graph()->zone(), control);
-  NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
-}
-
-
-void SimplifiedLowering::DoObjectIsSmi(Node* node) {
-  node->ReplaceInput(0,
-                     graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
-                                      jsgraph()->IntPtrConstant(kSmiTagMask)));
-  node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
-  NodeProperties::ChangeOp(node, machine()->WordEqual());
-}
-
-
 Node* SimplifiedLowering::StringComparison(Node* node) {
   Operator::Properties properties = node->op()->properties();
   Callable callable = CodeFactory::StringCompare(isolate());
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index f9410f8..358bd97 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -36,13 +36,16 @@
   void DoLoadBuffer(Node* node, MachineRepresentation rep,
                     RepresentationChanger* changer);
   void DoStoreBuffer(Node* node);
-  void DoObjectIsNumber(Node* node);
-  void DoObjectIsSmi(Node* node);
   void DoShift(Node* node, Operator const* op, Type* rhs_type);
   void DoStringEqual(Node* node);
   void DoStringLessThan(Node* node);
   void DoStringLessThanOrEqual(Node* node);
 
+  // TODO(bmeurer): This is a gigantic hack to support the gigantic LoadBuffer
+  // typing hack to support the gigantic "asm.js should be fast without proper
+  // verifier"-hack, ... Kill this! Soon! Really soon! I'm serious!
+  bool abort_compilation_ = false;
+
  private:
   JSGraph* const jsgraph_;
   Zone* const zone_;
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index 1eaa287..c7abe9c 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -7,7 +7,7 @@
 #include "src/base/lazy-instance.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
-#include "src/types-inl.h"
+#include "src/types.h"
 
 namespace v8 {
 namespace internal {
@@ -187,6 +187,7 @@
   V(ChangeBoolToBit, Operator::kNoProperties, 1)         \
   V(ChangeBitToBool, Operator::kNoProperties, 1)         \
   V(ObjectIsNumber, Operator::kNoProperties, 1)          \
+  V(ObjectIsReceiver, Operator::kNoProperties, 1)        \
   V(ObjectIsSmi, Operator::kNoProperties, 1)
 
 #define NO_THROW_OP_LIST(V)                 \
@@ -253,7 +254,6 @@
 
 
 const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
-  // TODO(titzer): What about the type parameter?
   return new (zone()) Operator(IrOpcode::kReferenceEqual,
                                Operator::kCommutative | Operator::kPure,
                                "ReferenceEqual", 2, 0, 0, 1, 0, 0);
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index 3821a6d..2ed4b5f 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -15,10 +15,7 @@
 namespace internal {
 
 // Forward declarations.
-template <class>
-class TypeImpl;
-struct ZoneTypeConfig;
-typedef TypeImpl<ZoneTypeConfig> Type;
+class Type;
 class Zone;
 
 
@@ -168,6 +165,7 @@
   const Operator* ChangeBitToBool();
 
   const Operator* ObjectIsNumber();
+  const Operator* ObjectIsReceiver();
   const Operator* ObjectIsSmi();
 
   const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index c1f816d..9679513 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -29,10 +29,8 @@
   Typer* const typer_;
 };
 
-
 Typer::Typer(Isolate* isolate, Graph* graph, Flags flags,
-             CompilationDependencies* dependencies,
-             Type::FunctionType* function_type)
+             CompilationDependencies* dependencies, FunctionType* function_type)
     : isolate_(isolate),
       graph_(graph),
       flags_(flags),
@@ -243,11 +241,14 @@
   static Type* NumberToInt32(Type*, Typer*);
   static Type* NumberToUint32(Type*, Typer*);
 
-  static Type* JSAddRanger(Type::RangeType*, Type::RangeType*, Typer*);
-  static Type* JSSubtractRanger(Type::RangeType*, Type::RangeType*, Typer*);
-  static Type* JSMultiplyRanger(Type::RangeType*, Type::RangeType*, Typer*);
-  static Type* JSDivideRanger(Type::RangeType*, Type::RangeType*, Typer*);
-  static Type* JSModulusRanger(Type::RangeType*, Type::RangeType*, Typer*);
+  static Type* ObjectIsNumber(Type*, Typer*);
+  static Type* ObjectIsReceiver(Type*, Typer*);
+  static Type* ObjectIsSmi(Type*, Typer*);
+
+  static Type* JSAddRanger(RangeType*, RangeType*, Typer*);
+  static Type* JSSubtractRanger(RangeType*, RangeType*, Typer*);
+  static Type* JSDivideRanger(RangeType*, RangeType*, Typer*);
+  static Type* JSModulusRanger(RangeType*, RangeType*, Typer*);
 
   static ComparisonOutcome JSCompareTyper(Type*, Type*, Typer*);
 
@@ -508,14 +509,36 @@
 }
 
 
+// Type checks.
+
+
+Type* Typer::Visitor::ObjectIsNumber(Type* type, Typer* t) {
+  if (type->Is(Type::Number())) return t->singleton_true_;
+  if (!type->Maybe(Type::Number())) return t->singleton_false_;
+  return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::ObjectIsReceiver(Type* type, Typer* t) {
+  if (type->Is(Type::Receiver())) return t->singleton_true_;
+  if (!type->Maybe(Type::Receiver())) return t->singleton_false_;
+  return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::ObjectIsSmi(Type* type, Typer* t) {
+  if (type->Is(Type::TaggedSigned())) return t->singleton_true_;
+  if (type->Is(Type::TaggedPointer())) return t->singleton_false_;
+  return Type::Boolean();
+}
+
+
 // -----------------------------------------------------------------------------
 
 
 // Control operators.
 
-
-Type* Typer::Visitor::TypeStart(Node* node) { return Type::Internal(zone()); }
-
+Type* Typer::Visitor::TypeStart(Node* node) { return Type::Internal(); }
 
 Type* Typer::Visitor::TypeIfException(Node* node) { return Type::Any(); }
 
@@ -524,7 +547,7 @@
 
 
 Type* Typer::Visitor::TypeParameter(Node* node) {
-  if (Type::FunctionType* function_type = typer_->function_type()) {
+  if (FunctionType* function_type = typer_->function_type()) {
     int const index = ParameterIndexOf(node->op());
     if (index >= 0 && index < function_type->Arity()) {
       return function_type->Parameter(index);
@@ -578,7 +601,7 @@
 
 
 Type* Typer::Visitor::TypeExternalConstant(Node* node) {
-  return Type::Internal(zone());
+  return Type::Internal();
 }
 
 
@@ -627,22 +650,15 @@
 
 Type* Typer::Visitor::TypeFrameState(Node* node) {
   // TODO(rossberg): Ideally FrameState wouldn't have a value output.
-  return Type::Internal(zone());
+  return Type::Internal();
 }
 
+Type* Typer::Visitor::TypeStateValues(Node* node) { return Type::Internal(); }
 
-Type* Typer::Visitor::TypeStateValues(Node* node) {
-  return Type::Internal(zone());
-}
-
-
-Type* Typer::Visitor::TypeObjectState(Node* node) {
-  return Type::Internal(zone());
-}
-
+Type* Typer::Visitor::TypeObjectState(Node* node) { return Type::Internal(); }
 
 Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
-  return Type::Internal(zone());
+  return Type::Internal();
 }
 
 
@@ -650,7 +666,12 @@
 
 
 Type* Typer::Visitor::TypeProjection(Node* node) {
-  // TODO(titzer): use the output type of the input to determine the bounds.
+  Type* const type = Operand(node, 0);
+  if (type->Is(Type::None())) return Type::None();
+  int const index = static_cast<int>(ProjectionIndexOf(node->op()));
+  if (type->IsTuple() && index < type->AsTuple()->Arity()) {
+    return type->AsTuple()->Element(index);
+  }
   return Type::Any();
 }
 
@@ -950,9 +971,7 @@
   return x == 0 ? 0 : x;  // -0 -> 0
 }
 
-
-Type* Typer::Visitor::JSAddRanger(Type::RangeType* lhs, Type::RangeType* rhs,
-                                  Typer* t) {
+Type* Typer::Visitor::JSAddRanger(RangeType* lhs, RangeType* rhs, Typer* t) {
   double results[4];
   results[0] = lhs->Min() + rhs->Min();
   results[1] = lhs->Min() + rhs->Max();
@@ -998,9 +1017,8 @@
   return Type::Number();
 }
 
-
-Type* Typer::Visitor::JSSubtractRanger(Type::RangeType* lhs,
-                                       Type::RangeType* rhs, Typer* t) {
+Type* Typer::Visitor::JSSubtractRanger(RangeType* lhs, RangeType* rhs,
+                                       Typer* t) {
   double results[4];
   results[0] = lhs->Min() - rhs->Min();
   results[1] = lhs->Min() - rhs->Max();
@@ -1037,41 +1055,38 @@
 }
 
 
-Type* Typer::Visitor::JSMultiplyRanger(Type::RangeType* lhs,
-                                       Type::RangeType* rhs, Typer* t) {
-  double results[4];
-  double lmin = lhs->Min();
-  double lmax = lhs->Max();
-  double rmin = rhs->Min();
-  double rmax = rhs->Max();
-  results[0] = lmin * rmin;
-  results[1] = lmin * rmax;
-  results[2] = lmax * rmin;
-  results[3] = lmax * rmax;
-  // If the result may be nan, we give up on calculating a precise type, because
-  // the discontinuity makes it too complicated.  Note that even if none of the
-  // "results" above is nan, the actual result may still be, so we have to do a
-  // different check:
-  bool maybe_nan = (lhs->Maybe(t->cache_.kSingletonZero) &&
-                    (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
-                   (rhs->Maybe(t->cache_.kSingletonZero) &&
-                    (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
-  if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN;  // Giving up.
-  bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
-                         (rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
-  Type* range =
-      Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
-  return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
-                         : range;
-}
-
-
 Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
   lhs = Rangify(ToNumber(lhs, t), t);
   rhs = Rangify(ToNumber(rhs, t), t);
   if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
   if (lhs->IsRange() && rhs->IsRange()) {
-    return JSMultiplyRanger(lhs->AsRange(), rhs->AsRange(), t);
+    double results[4];
+    double lmin = lhs->AsRange()->Min();
+    double lmax = lhs->AsRange()->Max();
+    double rmin = rhs->AsRange()->Min();
+    double rmax = rhs->AsRange()->Max();
+    results[0] = lmin * rmin;
+    results[1] = lmin * rmax;
+    results[2] = lmax * rmin;
+    results[3] = lmax * rmax;
+    // If the result may be nan, we give up on calculating a precise type,
+    // because
+    // the discontinuity makes it too complicated.  Note that even if none of
+    // the
+    // "results" above is nan, the actual result may still be, so we have to do
+    // a
+    // different check:
+    bool maybe_nan = (lhs->Maybe(t->cache_.kSingletonZero) &&
+                      (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
+                     (rhs->Maybe(t->cache_.kSingletonZero) &&
+                      (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
+    if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN;  // Giving up.
+    bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
+                           (rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
+    Type* range =
+        Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
+    return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
+                           : range;
   }
   return Type::Number();
 }
@@ -1090,9 +1105,8 @@
   return maybe_nan ? Type::Number() : Type::OrderedNumber();
 }
 
-
-Type* Typer::Visitor::JSModulusRanger(Type::RangeType* lhs,
-                                      Type::RangeType* rhs, Typer* t) {
+Type* Typer::Visitor::JSModulusRanger(RangeType* lhs, RangeType* rhs,
+                                      Typer* t) {
   double lmin = lhs->Min();
   double lmax = lhs->Max();
   double rmin = rhs->Min();
@@ -1286,8 +1300,8 @@
     } else if (receiver->IsClass() &&
                receiver->AsClass()->Map()->IsJSFunctionMap()) {
       Handle<Map> map = receiver->AsClass()->Map();
-      return map->has_non_instance_prototype() ? Type::Primitive(zone())
-                                               : Type::Receiver(zone());
+      return map->has_non_instance_prototype() ? Type::Primitive()
+                                               : Type::Receiver();
     }
   }
   return Type::Any();
@@ -1335,8 +1349,8 @@
     // Only weaken if there is range involved; we should converge quickly
     // for all other types (the exception is a union of many constants,
     // but we currently do not increase the number of constants in unions).
-    Type::RangeType* previous = previous_integer->GetRange();
-    Type::RangeType* current = current_integer->GetRange();
+    Type* previous = previous_integer->GetRange();
+    Type* current = current_integer->GetRange();
     if (current == nullptr || previous == nullptr) {
       return current_type;
     }
@@ -1397,19 +1411,12 @@
 
 
 Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
-  return Type::Boolean(zone());
+  return Type::Boolean();
 }
 
+Type* Typer::Visitor::TypeJSHasProperty(Node* node) { return Type::Boolean(); }
 
-Type* Typer::Visitor::TypeJSHasProperty(Node* node) {
-  return Type::Boolean(zone());
-}
-
-
-Type* Typer::Visitor::TypeJSInstanceOf(Node* node) {
-  return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeJSInstanceOf(Node* node) { return Type::Boolean(); }
 
 // JS context operators.
 
@@ -1430,9 +1437,6 @@
 }
 
 
-Type* Typer::Visitor::TypeJSLoadDynamic(Node* node) { return Type::Any(); }
-
-
 Type* Typer::Visitor::WrapContextTypeForInput(Node* node) {
   Type* outer = TypeOrNone(NodeProperties::GetContextInput(node));
   if (outer->Is(Type::None())) {
@@ -1525,8 +1529,14 @@
         case kMathClz32:
           return t->cache_.kZeroToThirtyTwo;
         // String functions.
+        case kStringCharCodeAt:
+          return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
+                             t->zone());
         case kStringCharAt:
+        case kStringConcat:
         case kStringFromCharCode:
+        case kStringToLowerCase:
+        case kStringToUpperCase:
           return Type::String();
         // Array functions.
         case kArrayIndexOf:
@@ -1550,15 +1560,15 @@
 
 Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
   switch (CallRuntimeParametersOf(node->op()).id()) {
+    case Runtime::kInlineIsJSReceiver:
+      return TypeUnaryOp(node, ObjectIsReceiver);
     case Runtime::kInlineIsSmi:
+      return TypeUnaryOp(node, ObjectIsSmi);
     case Runtime::kInlineIsArray:
     case Runtime::kInlineIsDate:
     case Runtime::kInlineIsTypedArray:
-    case Runtime::kInlineIsMinusZero:
-    case Runtime::kInlineIsFunction:
     case Runtime::kInlineIsRegExp:
-    case Runtime::kInlineIsJSReceiver:
-      return Type::Boolean(zone());
+      return Type::Boolean();
     case Runtime::kInlineDoubleLo:
     case Runtime::kInlineDoubleHi:
       return Type::Signed32();
@@ -1576,6 +1586,7 @@
     case Runtime::kInlineRegExpConstructResult:
       return Type::OtherObject();
     case Runtime::kInlineSubString:
+    case Runtime::kInlineStringCharFromCode:
       return Type::String();
     case Runtime::kInlineToInteger:
       return TypeUnaryOp(node, ToInteger);
@@ -1613,15 +1624,16 @@
 
 
 Type* Typer::Visitor::TypeJSForInPrepare(Node* node) {
-  // TODO(bmeurer): Return a tuple type here.
-  return Type::Any();
+  STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
+  Factory* const f = isolate()->factory();
+  Type* const cache_type = Type::Union(
+      typer_->cache_.kSmi, Type::Class(f->meta_map(), zone()), zone());
+  Type* const cache_array = Type::Class(f->fixed_array_map(), zone());
+  Type* const cache_length = typer_->cache_.kFixedArrayLengthType;
+  return Type::Tuple(cache_type, cache_array, cache_length, zone());
 }
 
-
-Type* Typer::Visitor::TypeJSForInDone(Node* node) {
-  return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeJSForInDone(Node* node) { return Type::Boolean(); }
 
 Type* Typer::Visitor::TypeJSForInStep(Node* node) {
   STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
@@ -1643,82 +1655,57 @@
 
 // Simplified operators.
 
-
-Type* Typer::Visitor::TypeBooleanNot(Node* node) {
-  return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
 
 Type* Typer::Visitor::TypeBooleanToNumber(Node* node) {
   return TypeUnaryOp(node, ToNumber);
 }
 
+Type* Typer::Visitor::TypeNumberEqual(Node* node) { return Type::Boolean(); }
 
-Type* Typer::Visitor::TypeNumberEqual(Node* node) {
-  return Type::Boolean(zone());
-}
-
-
-Type* Typer::Visitor::TypeNumberLessThan(Node* node) {
-  return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeNumberLessThan(Node* node) { return Type::Boolean(); }
 
 Type* Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
-  return Type::Boolean(zone());
+  return Type::Boolean();
 }
 
+Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(); }
 
-Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(zone()); }
+Type* Typer::Visitor::TypeNumberSubtract(Node* node) { return Type::Number(); }
 
+Type* Typer::Visitor::TypeNumberMultiply(Node* node) { return Type::Number(); }
 
-Type* Typer::Visitor::TypeNumberSubtract(Node* node) {
-  return Type::Number(zone());
-}
+Type* Typer::Visitor::TypeNumberDivide(Node* node) { return Type::Number(); }
 
-
-Type* Typer::Visitor::TypeNumberMultiply(Node* node) {
-  return Type::Number(zone());
-}
-
-
-Type* Typer::Visitor::TypeNumberDivide(Node* node) {
-  return Type::Number(zone());
-}
-
-
-Type* Typer::Visitor::TypeNumberModulus(Node* node) {
-  return Type::Number(zone());
-}
-
+Type* Typer::Visitor::TypeNumberModulus(Node* node) { return Type::Number(); }
 
 Type* Typer::Visitor::TypeNumberBitwiseOr(Node* node) {
-  return Type::Signed32(zone());
+  return Type::Signed32();
 }
 
 
 Type* Typer::Visitor::TypeNumberBitwiseXor(Node* node) {
-  return Type::Signed32(zone());
+  return Type::Signed32();
 }
 
 
 Type* Typer::Visitor::TypeNumberBitwiseAnd(Node* node) {
-  return Type::Signed32(zone());
+  return Type::Signed32();
 }
 
 
 Type* Typer::Visitor::TypeNumberShiftLeft(Node* node) {
-  return Type::Signed32(zone());
+  return Type::Signed32();
 }
 
 
 Type* Typer::Visitor::TypeNumberShiftRight(Node* node) {
-  return Type::Signed32(zone());
+  return Type::Signed32();
 }
 
 
 Type* Typer::Visitor::TypeNumberShiftRightLogical(Node* node) {
-  return Type::Unsigned32(zone());
+  return Type::Unsigned32();
 }
 
 
@@ -1733,7 +1720,7 @@
 
 
 Type* Typer::Visitor::TypeNumberIsHoleNaN(Node* node) {
-  return Type::Boolean(zone());
+  return Type::Boolean();
 }
 
 
@@ -1755,19 +1742,12 @@
   return TypeBinaryOp(node, ReferenceEqualTyper);
 }
 
+Type* Typer::Visitor::TypeStringEqual(Node* node) { return Type::Boolean(); }
 
-Type* Typer::Visitor::TypeStringEqual(Node* node) {
-  return Type::Boolean(zone());
-}
-
-
-Type* Typer::Visitor::TypeStringLessThan(Node* node) {
-  return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeStringLessThan(Node* node) { return Type::Boolean(); }
 
 Type* Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
-  return Type::Boolean(zone());
+  return Type::Boolean();
 }
 
 
@@ -1931,20 +1911,17 @@
 
 
 Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
-  Type* arg = Operand(node, 0);
-  if (arg->Is(Type::None())) return Type::None();
-  if (arg->Is(Type::Number())) return typer_->singleton_true_;
-  if (!arg->Maybe(Type::Number())) return typer_->singleton_false_;
-  return Type::Boolean();
+  return TypeUnaryOp(node, ObjectIsNumber);
+}
+
+
+Type* Typer::Visitor::TypeObjectIsReceiver(Node* node) {
+  return TypeUnaryOp(node, ObjectIsReceiver);
 }
 
 
 Type* Typer::Visitor::TypeObjectIsSmi(Node* node) {
-  Type* arg = Operand(node, 0);
-  if (arg->Is(Type::None())) return Type::None();
-  if (arg->Is(Type::TaggedSigned())) return typer_->singleton_true_;
-  if (arg->Is(Type::TaggedPointer())) return typer_->singleton_false_;
-  return Type::Boolean();
+  return TypeUnaryOp(node, ObjectIsSmi);
 }
 
 
@@ -1952,6 +1929,7 @@
 
 Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
 
+Type* Typer::Visitor::TypeStackSlot(Node* node) { return Type::Any(); }
 
 Type* Typer::Visitor::TypeStore(Node* node) {
   UNREACHABLE();
@@ -1989,6 +1967,11 @@
 Type* Typer::Visitor::TypeWord32Ctz(Node* node) { return Type::Integral32(); }
 
 
+Type* Typer::Visitor::TypeWord32ReverseBits(Node* node) {
+  return Type::Integral32();
+}
+
+
 Type* Typer::Visitor::TypeWord32Popcnt(Node* node) {
   return Type::Integral32();
 }
@@ -2021,6 +2004,11 @@
 Type* Typer::Visitor::TypeWord64Ctz(Node* node) { return Type::Internal(); }
 
 
+Type* Typer::Visitor::TypeWord64ReverseBits(Node* node) {
+  return Type::Internal();
+}
+
+
 Type* Typer::Visitor::TypeWord64Popcnt(Node* node) { return Type::Internal(); }
 
 
@@ -2145,6 +2133,17 @@
 }
 
 
+Type* Typer::Visitor::TypeTruncateFloat32ToInt32(Node* node) {
+  return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeTruncateFloat32ToUint32(Node* node) {
+  return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
+                         zone());
+}
+
+
 Type* Typer::Visitor::TypeTryTruncateFloat32ToInt64(Node* node) {
   return Type::Internal();
 }
@@ -2200,6 +2199,11 @@
 }
 
 
+Type* Typer::Visitor::TypeRoundInt32ToFloat32(Node* node) {
+  return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
 Type* Typer::Visitor::TypeRoundInt64ToFloat32(Node* node) {
   return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
 }
@@ -2210,6 +2214,11 @@
 }
 
 
+Type* Typer::Visitor::TypeRoundUint32ToFloat32(Node* node) {
+  return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
 Type* Typer::Visitor::TypeRoundUint64ToFloat32(Node* node) {
   return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
 }
@@ -2406,6 +2415,9 @@
   return Type::Internal();
 }
 
+Type* Typer::Visitor::TypeLoadParentFramePointer(Node* node) {
+  return Type::Internal();
+}
 
 Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
 
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index 4177026..0982b28 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -30,7 +30,7 @@
 
   Typer(Isolate* isolate, Graph* graph, Flags flags = kNoFlags,
         CompilationDependencies* dependencies = nullptr,
-        Type::FunctionType* function_type = nullptr);
+        FunctionType* function_type = nullptr);
   ~Typer();
 
   void Run();
@@ -46,13 +46,13 @@
   Isolate* isolate() const { return isolate_; }
   Flags flags() const { return flags_; }
   CompilationDependencies* dependencies() const { return dependencies_; }
-  Type::FunctionType* function_type() const { return function_type_; }
+  FunctionType* function_type() const { return function_type_; }
 
   Isolate* const isolate_;
   Graph* const graph_;
   Flags const flags_;
   CompilationDependencies* const dependencies_;
-  Type::FunctionType* function_type_;
+  FunctionType* function_type_;
   Decorator* decorator_;
   TypeCache const& cache_;
 
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index 1a3ef8e..99480ca 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -22,7 +22,6 @@
 #include "src/compiler/schedule.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/ostreams.h"
-#include "src/types-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -428,13 +427,20 @@
       }
       break;
     }
-    case IrOpcode::kFrameState:
+    case IrOpcode::kFrameState: {
       // TODO(jarin): what are the constraints on these?
       CHECK_EQ(5, value_count);
       CHECK_EQ(0, control_count);
       CHECK_EQ(0, effect_count);
       CHECK_EQ(6, input_count);
+      for (int i = 0; i < 3; ++i) {
+        CHECK(NodeProperties::GetValueInput(node, i)->opcode() ==
+                  IrOpcode::kStateValues ||
+              NodeProperties::GetValueInput(node, i)->opcode() ==
+                  IrOpcode::kTypedStateValues);
+      }
       break;
+    }
     case IrOpcode::kStateValues:
     case IrOpcode::kObjectState:
     case IrOpcode::kTypedStateValues:
@@ -553,7 +559,6 @@
       break;
 
     case IrOpcode::kJSLoadContext:
-    case IrOpcode::kJSLoadDynamic:
       // Type can be anything.
       CheckUpperIs(node, Type::Any());
       break;
@@ -707,6 +712,7 @@
       break;
     }
     case IrOpcode::kObjectIsNumber:
+    case IrOpcode::kObjectIsReceiver:
     case IrOpcode::kObjectIsSmi:
       CheckValueInputIs(node, 0, Type::Any());
       CheckUpperIs(node, Type::Boolean());
@@ -824,6 +830,7 @@
     // -----------------------
     case IrOpcode::kLoad:
     case IrOpcode::kStore:
+    case IrOpcode::kStackSlot:
     case IrOpcode::kWord32And:
     case IrOpcode::kWord32Or:
     case IrOpcode::kWord32Xor:
@@ -834,6 +841,7 @@
     case IrOpcode::kWord32Equal:
     case IrOpcode::kWord32Clz:
     case IrOpcode::kWord32Ctz:
+    case IrOpcode::kWord32ReverseBits:
     case IrOpcode::kWord32Popcnt:
     case IrOpcode::kWord64And:
     case IrOpcode::kWord64Or:
@@ -845,6 +853,7 @@
     case IrOpcode::kWord64Clz:
     case IrOpcode::kWord64Popcnt:
     case IrOpcode::kWord64Ctz:
+    case IrOpcode::kWord64ReverseBits:
     case IrOpcode::kWord64Equal:
     case IrOpcode::kInt32Add:
     case IrOpcode::kInt32AddWithOverflow:
@@ -907,8 +916,10 @@
     case IrOpcode::kFloat64LessThan:
     case IrOpcode::kFloat64LessThanOrEqual:
     case IrOpcode::kTruncateInt64ToInt32:
+    case IrOpcode::kRoundInt32ToFloat32:
     case IrOpcode::kRoundInt64ToFloat32:
     case IrOpcode::kRoundInt64ToFloat64:
+    case IrOpcode::kRoundUint32ToFloat32:
     case IrOpcode::kRoundUint64ToFloat64:
     case IrOpcode::kRoundUint64ToFloat32:
     case IrOpcode::kTruncateFloat64ToFloat32:
@@ -924,6 +935,8 @@
     case IrOpcode::kChangeFloat32ToFloat64:
     case IrOpcode::kChangeFloat64ToInt32:
     case IrOpcode::kChangeFloat64ToUint32:
+    case IrOpcode::kTruncateFloat32ToInt32:
+    case IrOpcode::kTruncateFloat32ToUint32:
     case IrOpcode::kTryTruncateFloat32ToInt64:
     case IrOpcode::kTryTruncateFloat64ToInt64:
     case IrOpcode::kTryTruncateFloat32ToUint64:
@@ -934,6 +947,7 @@
     case IrOpcode::kFloat64InsertHighWord32:
     case IrOpcode::kLoadStackPointer:
     case IrOpcode::kLoadFramePointer:
+    case IrOpcode::kLoadParentFramePointer:
     case IrOpcode::kCheckedLoad:
     case IrOpcode::kCheckedStore:
       // TODO(rossberg): Check.
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 17065d6..9c3858d 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -15,6 +15,7 @@
 #include "src/compiler/graph.h"
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/instruction-selector.h"
+#include "src/compiler/int64-lowering.h"
 #include "src/compiler/js-generic-lowering.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/js-operator.h"
@@ -29,6 +30,9 @@
 
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
+#include "src/factory.h"
+#include "src/log-inl.h"
+#include "src/profiler/cpu-profiler.h"
 
 #include "src/wasm/ast-decoder.h"
 #include "src/wasm/wasm-module.h"
@@ -105,6 +109,9 @@
   // Make the current control path trap to unreachable.
   void Unreachable() { ConnectTrap(kTrapUnreachable); }
 
+  // Always trap with the given reason.
+  void TrapAlways(TrapReason reason) { ConnectTrap(reason); }
+
   // Add a check that traps if {node} is equal to {val}.
   Node* TrapIfEq32(TrapReason reason, Node* node, int32_t val) {
     Int32Matcher m(node);
@@ -165,6 +172,28 @@
     *effect_ptr = before;
   }
 
+  Node* GetTrapValue(wasm::FunctionSig* sig) {
+    if (sig->return_count() > 0) {
+      switch (sig->GetReturn()) {
+        case wasm::kAstI32:
+          return jsgraph()->Int32Constant(0xdeadbeef);
+        case wasm::kAstI64:
+          return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
+        case wasm::kAstF32:
+          return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
+        case wasm::kAstF64:
+          return jsgraph()->Float64Constant(
+              bit_cast<double>(0xdeadbeefdeadbeef));
+          break;
+        default:
+          UNREACHABLE();
+          return nullptr;
+      }
+    } else {
+      return jsgraph()->Int32Constant(0xdeadbeef);
+    }
+  }
+
  private:
   WasmGraphBuilder* builder_;
   JSGraph* jsgraph_;
@@ -197,7 +226,7 @@
     *effect_ptr = effects_[reason] =
         graph()->NewNode(common()->EffectPhi(1), *effect_ptr, *control_ptr);
 
-    if (module && !module->context.is_null()) {
+    if (module && !module->instance->context.is_null()) {
       // Use the module context to call the runtime to throw an exception.
       Runtime::FunctionId f = Runtime::kThrow;
       const Runtime::Function* fun = Runtime::FunctionForId(f);
@@ -210,7 +239,7 @@
           jsgraph()->ExternalConstant(
               ExternalReference(f, jsgraph()->isolate())),  // ref
           jsgraph()->Int32Constant(fun->nargs),             // arity
-          jsgraph()->Constant(module->context),             // context
+          jsgraph()->Constant(module->instance->context),   // context
           *effect_ptr,
           *control_ptr};
 
@@ -227,29 +256,7 @@
       end = thrw;
     } else {
       // End the control flow with returning 0xdeadbeef
-      Node* ret_value;
-      if (builder_->GetFunctionSignature()->return_count() > 0) {
-        switch (builder_->GetFunctionSignature()->GetReturn()) {
-          case wasm::kAstI32:
-            ret_value = jsgraph()->Int32Constant(0xdeadbeef);
-            break;
-          case wasm::kAstI64:
-            ret_value = jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
-            break;
-          case wasm::kAstF32:
-            ret_value = jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
-            break;
-          case wasm::kAstF64:
-            ret_value = jsgraph()->Float64Constant(
-                bit_cast<double>(0xdeadbeefdeadbeef));
-            break;
-          default:
-            UNREACHABLE();
-            ret_value = nullptr;
-        }
-      } else {
-        ret_value = jsgraph()->Int32Constant(0xdeadbeef);
-      }
+      Node* ret_value = GetTrapValue(builder_->GetFunctionSignature());
       end = graph()->NewNode(jsgraph()->common()->Return(), ret_value,
                              *effect_ptr, *control_ptr);
     }
@@ -475,6 +482,9 @@
       op = m->Uint32LessThanOrEqual();
       std::swap(left, right);
       break;
+    case wasm::kExprI64And:
+      op = m->Word64And();
+      break;
 #if WASM_64
     // Opcodes only supported on 64-bit platforms.
     // TODO(titzer): query the machine operator builder here instead of #ifdef.
@@ -525,9 +535,6 @@
       op = m->Uint64Mod();
       return graph()->NewNode(op, left, right,
                               trap_->ZeroCheck64(kTrapRemByZero, right));
-    case wasm::kExprI64And:
-      op = m->Word64And();
-      break;
     case wasm::kExprI64Ior:
       op = m->Word64Or();
       break;
@@ -696,14 +703,10 @@
       op = m->ChangeUint32ToFloat64();
       break;
     case wasm::kExprF32SConvertI32:
-      op = m->ChangeInt32ToFloat64();  // TODO(titzer): two conversions
-      input = graph()->NewNode(op, input);
-      op = m->TruncateFloat64ToFloat32();
+      op = m->RoundInt32ToFloat32();
       break;
     case wasm::kExprF32UConvertI32:
-      op = m->ChangeUint32ToFloat64();
-      input = graph()->NewNode(op, input);
-      op = m->TruncateFloat64ToFloat32();
+      op = m->RoundUint32ToFloat32();
       break;
     case wasm::kExprI32SConvertF32:
       return BuildI32SConvertF32(input);
@@ -725,6 +728,10 @@
       if (m->Word32Ctz().IsSupported()) {
         op = m->Word32Ctz().op();
         break;
+      } else if (m->Word32ReverseBits().IsSupported()) {
+        Node* reversed = graph()->NewNode(m->Word32ReverseBits().op(), input);
+        Node* result = graph()->NewNode(m->Word32Clz(), reversed);
+        return result;
       } else {
         return BuildI32Ctz(input);
       }
@@ -738,84 +745,53 @@
       }
     }
     case wasm::kExprF32Floor: {
-      if (m->Float32RoundDown().IsSupported()) {
-        op = m->Float32RoundDown().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float32RoundDown().IsSupported()) return BuildF32Floor(input);
+      op = m->Float32RoundDown().op();
+      break;
     }
     case wasm::kExprF32Ceil: {
-      if (m->Float32RoundUp().IsSupported()) {
-        op = m->Float32RoundUp().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float32RoundUp().IsSupported()) return BuildF32Ceil(input);
+      op = m->Float32RoundUp().op();
+      break;
     }
     case wasm::kExprF32Trunc: {
-      if (m->Float32RoundTruncate().IsSupported()) {
-        op = m->Float32RoundTruncate().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float32RoundTruncate().IsSupported()) return BuildF32Trunc(input);
+      op = m->Float32RoundTruncate().op();
+      break;
     }
     case wasm::kExprF32NearestInt: {
-      if (m->Float32RoundTiesEven().IsSupported()) {
-        op = m->Float32RoundTiesEven().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float32RoundTiesEven().IsSupported())
+        return BuildF32NearestInt(input);
+      op = m->Float32RoundTiesEven().op();
+      break;
     }
     case wasm::kExprF64Floor: {
-      if (m->Float64RoundDown().IsSupported()) {
-        op = m->Float64RoundDown().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float64RoundDown().IsSupported()) return BuildF64Floor(input);
+      op = m->Float64RoundDown().op();
+      break;
     }
     case wasm::kExprF64Ceil: {
-      if (m->Float64RoundUp().IsSupported()) {
-        op = m->Float64RoundUp().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float64RoundUp().IsSupported()) return BuildF64Ceil(input);
+      op = m->Float64RoundUp().op();
+      break;
     }
     case wasm::kExprF64Trunc: {
-      if (m->Float64RoundTruncate().IsSupported()) {
-        op = m->Float64RoundTruncate().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float64RoundTruncate().IsSupported()) return BuildF64Trunc(input);
+      op = m->Float64RoundTruncate().op();
+      break;
     }
     case wasm::kExprF64NearestInt: {
-      if (m->Float64RoundTiesEven().IsSupported()) {
-        op = m->Float64RoundTiesEven().op();
-        break;
-      } else {
-        op = UnsupportedOpcode(opcode);
-        break;
-      }
+      if (!m->Float64RoundTiesEven().IsSupported())
+        return BuildF64NearestInt(input);
+      op = m->Float64RoundTiesEven().op();
+      break;
     }
-
-#if WASM_64
-    // Opcodes only supported on 64-bit platforms.
-    // TODO(titzer): query the machine operator builder here instead of #ifdef.
     case wasm::kExprI32ConvertI64:
       op = m->TruncateInt64ToInt32();
       break;
+#if WASM_64
+    // Opcodes only supported on 64-bit platforms.
+    // TODO(titzer): query the machine operator builder here instead of #ifdef.
     case wasm::kExprI64SConvertI32:
       op = m->ChangeInt32ToInt64();
       break;
@@ -883,6 +859,10 @@
       if (m->Word64Ctz().IsSupported()) {
         op = m->Word64Ctz().op();
         break;
+      } else if (m->Word64ReverseBits().IsSupported()) {
+        Node* reversed = graph()->NewNode(m->Word64ReverseBits().op(), input);
+        Node* result = graph()->NewNode(m->Word64Clz(), reversed);
+        return result;
       } else {
         return BuildI64Ctz(input);
       }
@@ -1061,8 +1041,12 @@
 
   return left_le_right.Phi(
       wasm::kAstF32, left,
-      right_lt_left.Phi(wasm::kAstF32, right,
-                        left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+      right_lt_left.Phi(
+          wasm::kAstF32, right,
+          left_is_not_nan.Phi(
+              wasm::kAstF32,
+              Binop(wasm::kExprF32Mul, right, Float32Constant(1.0)),
+              Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
 }
 
 
@@ -1078,8 +1062,12 @@
 
   return left_ge_right.Phi(
       wasm::kAstF32, left,
-      right_gt_left.Phi(wasm::kAstF32, right,
-                        left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+      right_gt_left.Phi(
+          wasm::kAstF32, right,
+          left_is_not_nan.Phi(
+              wasm::kAstF32,
+              Binop(wasm::kExprF32Mul, right, Float32Constant(1.0)),
+              Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
 }
 
 
@@ -1095,8 +1083,12 @@
 
   return left_le_right.Phi(
       wasm::kAstF64, left,
-      right_lt_left.Phi(wasm::kAstF64, right,
-                        left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+      right_lt_left.Phi(
+          wasm::kAstF64, right,
+          left_is_not_nan.Phi(
+              wasm::kAstF64,
+              Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
+              Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
 }
 
 
@@ -1112,8 +1104,12 @@
 
   return left_ge_right.Phi(
       wasm::kAstF64, left,
-      right_gt_left.Phi(wasm::kAstF64, right,
-                        left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+      right_gt_left.Phi(
+          wasm::kAstF64, right,
+          left_is_not_nan.Phi(
+              wasm::kAstF64,
+              Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
+              Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
 }
 
 
@@ -1121,14 +1117,12 @@
   MachineOperatorBuilder* m = jsgraph()->machine();
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF32Trunc, input);
-  // TODO(titzer): two conversions
-  Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
-  Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), f64_trunc);
+  Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
 
   // Convert the result back to f64. If we end up at a different value than the
   // truncated input value, then there has been an overflow and we trap.
-  Node* check = Unop(wasm::kExprF64SConvertI32, result);
-  Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+  Node* check = Unop(wasm::kExprF32SConvertI32, result);
+  Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
   trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
 
   return result;
@@ -1137,6 +1131,10 @@
 
 Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js) {
+    return graph()->NewNode(
+        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+  }
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF64Trunc, input);
   Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
@@ -1155,14 +1153,12 @@
   MachineOperatorBuilder* m = jsgraph()->machine();
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF32Trunc, input);
-  // TODO(titzer): two conversions
-  Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
-  Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), f64_trunc);
+  Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
 
-  // Convert the result back to f64. If we end up at a different value than the
+  // Convert the result back to f32. If we end up at a different value than the
   // truncated input value, then there has been an overflow and we trap.
-  Node* check = Unop(wasm::kExprF64UConvertI32, result);
-  Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+  Node* check = Unop(wasm::kExprF32UConvertI32, result);
+  Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
   trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
 
   return result;
@@ -1171,6 +1167,10 @@
 
 Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
   MachineOperatorBuilder* m = jsgraph()->machine();
+  if (module_ && module_->asm_js) {
+    return graph()->NewNode(
+        m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+  }
   // Truncation of the input value is needed for the overflow check later.
   Node* trunc = Unop(wasm::kExprF64Trunc, input);
   Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), trunc);
@@ -1360,6 +1360,117 @@
   return result;
 }
 
+Node* WasmGraphBuilder::BuildF32Trunc(Node* input) {
+  MachineType type = MachineType::Float32();
+  ExternalReference ref =
+      ExternalReference::f32_trunc_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF32Floor(Node* input) {
+  MachineType type = MachineType::Float32();
+  ExternalReference ref =
+      ExternalReference::f32_floor_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF32Ceil(Node* input) {
+  MachineType type = MachineType::Float32();
+  ExternalReference ref =
+      ExternalReference::f32_ceil_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF32NearestInt(Node* input) {
+  MachineType type = MachineType::Float32();
+  ExternalReference ref =
+      ExternalReference::f32_nearest_int_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64Trunc(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_trunc_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64Floor(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_floor_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64Ceil(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_ceil_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64NearestInt(Node* input) {
+  MachineType type = MachineType::Float64();
+  ExternalReference ref =
+      ExternalReference::f64_nearest_int_wrapper_function(jsgraph()->isolate());
+  return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildRoundingInstruction(Node* input,
+                                                 ExternalReference ref,
+                                                 MachineType type) {
+  // We do truncation by calling a C function which calculates the truncation
+  // for us. The input is passed to the C function as a double* to avoid double
+  // parameters. For this we reserve a slot on the stack, store the parameter in
+  // that slot, pass a pointer to the slot to the C function, and after calling
+  // the C function we collect the return value from the stack slot.
+
+  Node* stack_slot_param =
+      graph()->NewNode(jsgraph()->machine()->StackSlot(type.representation()));
+
+  const Operator* store_op = jsgraph()->machine()->Store(
+      StoreRepresentation(type.representation(), kNoWriteBarrier));
+  *effect_ =
+      graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
+                       input, *effect_, *control_);
+
+  Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0, 1);
+  sig_builder.AddParam(MachineType::Pointer());
+  Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+
+  Node* args[] = {function, stack_slot_param};
+
+  BuildCCall(sig_builder.Build(), args);
+
+  const Operator* load_op = jsgraph()->machine()->Load(type);
+
+  Node* load =
+      graph()->NewNode(load_op, stack_slot_param, jsgraph()->Int32Constant(0),
+                       *effect_, *control_);
+  *effect_ = load;
+  return load;
+}
+
+Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node** args) {
+  const size_t params = sig->parameter_count();
+  const size_t extra = 2;  // effect and control inputs.
+  const size_t count = 1 + params + extra;
+
+  // Reallocate the buffer to make space for extra inputs.
+  args = Realloc(args, count);
+
+  // Add effect and control inputs.
+  args[params + 1] = *effect_;
+  args[params + 2] = *control_;
+
+  CallDescriptor* desc =
+      Linkage::GetSimplifiedCDescriptor(jsgraph()->zone(), sig);
+
+  const Operator* op = jsgraph()->common()->Call(desc);
+  Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+  *effect_ = call;
+  return call;
+}
 
 Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
   const size_t params = sig->parameter_count();
@@ -1373,8 +1484,9 @@
   args[params + 1] = *effect_;
   args[params + 2] = *control_;
 
-  const Operator* op = jsgraph()->common()->Call(
-      module_->GetWasmCallDescriptor(jsgraph()->zone(), sig));
+  CallDescriptor* descriptor =
+      wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
+  const Operator* op = jsgraph()->common()->Call(descriptor);
   Node* call = graph()->NewNode(op, static_cast<int>(count), args);
 
   *effect_ = call;
@@ -1392,23 +1504,38 @@
   return BuildWasmCall(sig, args);
 }
 
+Node* WasmGraphBuilder::CallImport(uint32_t index, Node** args) {
+  DCHECK_NULL(args[0]);
+
+  // Add code object as constant.
+  args[0] = Constant(module_->GetImportCode(index));
+  wasm::FunctionSig* sig = module_->GetImportSignature(index);
+
+  return BuildWasmCall(sig, args);
+}
 
 Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
   DCHECK_NOT_NULL(args[0]);
+  DCHECK(module_ && module_->instance);
 
   MachineOperatorBuilder* machine = jsgraph()->machine();
 
   // Compute the code object by loading it from the function table.
   Node* key = args[0];
-  Node* table = FunctionTable();
 
   // Bounds check the index.
   int table_size = static_cast<int>(module_->FunctionTableSize());
-  {
+  if (table_size > 0) {
+    // Bounds check against the table size.
     Node* size = Int32Constant(static_cast<int>(table_size));
     Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
     trap_->AddTrapIfFalse(kTrapFuncInvalid, in_bounds);
+  } else {
+    // No function table. Generate a trap and return a constant.
+    trap_->AddTrapIfFalse(kTrapFuncInvalid, Int32Constant(0));
+    return trap_->GetTrapValue(module_->GetSignature(index));
   }
+  Node* table = FunctionTable();
 
   // Load signature from the table and check.
   // The table is a FixedArray; signatures are encoded as SMIs.
@@ -1546,7 +1673,8 @@
   args[pos++] = *control_;
 
   // Call the WASM code.
-  CallDescriptor* desc = module_->GetWasmCallDescriptor(jsgraph()->zone(), sig);
+  CallDescriptor* desc =
+      wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
   Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
   Node* jsval =
       ToJS(call, context,
@@ -1631,18 +1759,23 @@
 
 
 Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
+  DCHECK(module_ && module_->instance);
   if (offset == 0) {
-    if (!mem_buffer_)
-      mem_buffer_ = jsgraph()->IntPtrConstant(module_->mem_start);
+    if (!mem_buffer_) {
+      mem_buffer_ = jsgraph()->IntPtrConstant(
+          reinterpret_cast<uintptr_t>(module_->instance->mem_start));
+    }
     return mem_buffer_;
   } else {
-    return jsgraph()->IntPtrConstant(module_->mem_start + offset);
+    return jsgraph()->IntPtrConstant(
+        reinterpret_cast<uintptr_t>(module_->instance->mem_start + offset));
   }
 }
 
 
 Node* WasmGraphBuilder::MemSize(uint32_t offset) {
-  int32_t size = static_cast<int>(module_->mem_end - module_->mem_start);
+  DCHECK(module_ && module_->instance);
+  uint32_t size = static_cast<uint32_t>(module_->instance->mem_size);
   if (offset == 0) {
     if (!mem_size_) mem_size_ = jsgraph()->Int32Constant(size);
     return mem_size_;
@@ -1653,18 +1786,21 @@
 
 
 Node* WasmGraphBuilder::FunctionTable() {
+  DCHECK(module_ && module_->instance &&
+         !module_->instance->function_table.is_null());
   if (!function_table_) {
-    DCHECK(!module_->function_table.is_null());
-    function_table_ = jsgraph()->Constant(module_->function_table);
+    function_table_ = jsgraph()->Constant(module_->instance->function_table);
   }
   return function_table_;
 }
 
 
 Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
+  DCHECK(module_ && module_->instance && module_->instance->globals_start);
   MachineType mem_type = module_->GetGlobalType(index);
   Node* addr = jsgraph()->IntPtrConstant(
-      module_->globals_area + module_->module->globals->at(index).offset);
+      reinterpret_cast<uintptr_t>(module_->instance->globals_start +
+                                  module_->module->globals->at(index).offset));
   const Operator* op = jsgraph()->machine()->Load(mem_type);
   Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
                                 *control_);
@@ -1674,9 +1810,11 @@
 
 
 Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
+  DCHECK(module_ && module_->instance && module_->instance->globals_start);
   MachineType mem_type = module_->GetGlobalType(index);
   Node* addr = jsgraph()->IntPtrConstant(
-      module_->globals_area + module_->module->globals->at(index).offset);
+      reinterpret_cast<uintptr_t>(module_->instance->globals_start +
+                                  module_->module->globals->at(index).offset));
   const Operator* op = jsgraph()->machine()->Store(
       StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
   Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
@@ -1689,12 +1827,11 @@
 void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
                                       uint32_t offset) {
   // TODO(turbofan): fold bounds checks for constant indexes.
-  CHECK_GE(module_->mem_end, module_->mem_start);
-  ptrdiff_t size = module_->mem_end - module_->mem_start;
+  DCHECK(module_ && module_->instance);
+  size_t size = module_->instance->mem_size;
   byte memsize = wasm::WasmOpcodes::MemSize(memtype);
   Node* cond;
-  if (static_cast<ptrdiff_t>(offset) >= size ||
-      static_cast<ptrdiff_t>(offset + memsize) > size) {
+  if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
     // The access will always throw.
     cond = jsgraph()->Int32Constant(0);
   } else {
@@ -1782,6 +1919,35 @@
 
 Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
 
+void WasmGraphBuilder::Int64LoweringForTesting() {
+  if (kPointerSize == 4) {
+    Int64Lowering r(jsgraph()->graph(), jsgraph()->machine(),
+                    jsgraph()->common(), jsgraph()->zone(),
+                    function_signature_);
+    r.LowerGraph();
+  }
+}
+
+static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+                                      CompilationInfo* info,
+                                      const char* message, uint32_t index,
+                                      const char* func_name) {
+  Isolate* isolate = info->isolate();
+  if (isolate->logger()->is_logging_code_events() ||
+      isolate->cpu_profiler()->is_profiling()) {
+    ScopedVector<char> buffer(128);
+    SNPrintF(buffer, "%s#%d:%s", message, index, func_name);
+    Handle<String> name_str =
+        isolate->factory()->NewStringFromAsciiChecked(buffer.start());
+    Handle<String> script_str =
+        isolate->factory()->NewStringFromAsciiChecked("(WASM)");
+    Handle<Code> code = info->code();
+    Handle<SharedFunctionInfo> shared =
+        isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
+    PROFILE(isolate,
+            CodeCreateEvent(tag, *code, *shared, info, *script_str, 0, 0));
+  }
+}
 
 Handle<JSFunction> CompileJSToWasmWrapper(
     Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
@@ -1849,38 +2015,42 @@
         module->GetFunctionSignature(index)->parameter_count());
     CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
         &zone, false, params + 1, CallDescriptor::kNoFlags);
-    CompilationInfo info("js-to-wasm", isolate, &zone);
     // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
-    info.set_output_code_kind(Code::WASM_FUNCTION);
+    Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+    bool debugging =
+#if DEBUG
+        true;
+#else
+        FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+#endif
+    const char* func_name = "js-to-wasm";
+
+    static unsigned id = 0;
+    Vector<char> buffer;
+    if (debugging) {
+      buffer = Vector<char>::New(128);
+      SNPrintF(buffer, "js-to-wasm#%d", id);
+      func_name = buffer.start();
+    }
+
+    CompilationInfo info(func_name, isolate, &zone, flags);
     Handle<Code> code =
         Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
-
-#ifdef ENABLE_DISASSEMBLER
-    // Disassemble the wrapper code for debugging.
-    if (!code.is_null() && FLAG_print_opt_code) {
-      Vector<char> buffer;
-      const char* name = "";
-      if (func->name_offset > 0) {
-        const byte* ptr = module->module->module_start + func->name_offset;
-        name = reinterpret_cast<const char*>(ptr);
-      }
-      SNPrintF(buffer, "JS->WASM function wrapper #%d:%s", index, name);
-      OFStream os(stdout);
-      code->Disassemble(buffer.start(), os);
+    if (debugging) {
+      buffer.Dispose();
     }
-#endif
+
+    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
+                              module->module->GetName(func->name_offset));
     // Set the JSFunction's machine code.
     function->set_code(*code);
   }
   return function;
 }
 
-
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
                                     Handle<JSFunction> function,
-                                    uint32_t index) {
-  wasm::WasmFunction* func = &module->module->functions->at(index);
-
+                                    wasm::FunctionSig* sig, const char* name) {
   //----------------------------------------------------------------------------
   // Create the Graph
   //----------------------------------------------------------------------------
@@ -1894,11 +2064,11 @@
   Node* control = nullptr;
   Node* effect = nullptr;
 
-  WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+  WasmGraphBuilder builder(&zone, &jsgraph, sig);
   builder.set_control_ptr(&control);
   builder.set_effect_ptr(&effect);
   builder.set_module(module);
-  builder.BuildWasmToJSWrapper(function, func->sig);
+  builder.BuildWasmToJSWrapper(function, sig);
 
   Handle<Code> code = Handle<Code>::null();
   {
@@ -1923,26 +2093,33 @@
     }
 
     // Schedule and compile to machine code.
-    CallDescriptor* incoming = module->GetWasmCallDescriptor(&zone, func->sig);
-    CompilationInfo info("wasm-to-js", isolate, &zone);
+    CallDescriptor* incoming =
+        wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
     // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
-    info.set_output_code_kind(Code::WASM_FUNCTION);
-    code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
-
-#ifdef ENABLE_DISASSEMBLER
-    // Disassemble the wrapper code for debugging.
-    if (!code.is_null() && FLAG_print_opt_code) {
-      Vector<char> buffer;
-      const char* name = "";
-      if (func->name_offset > 0) {
-        const byte* ptr = module->module->module_start + func->name_offset;
-        name = reinterpret_cast<const char*>(ptr);
-      }
-      SNPrintF(buffer, "WASM->JS function wrapper #%d:%s", index, name);
-      OFStream os(stdout);
-      code->Disassemble(buffer.start(), os);
-    }
+    Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+    bool debugging =
+#if DEBUG
+        true;
+#else
+        FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
 #endif
+    const char* func_name = "wasm-to-js";
+    static unsigned id = 0;
+    Vector<char> buffer;
+    if (debugging) {
+      buffer = Vector<char>::New(128);
+      SNPrintF(buffer, "wasm-to-js#%d", id);
+      func_name = buffer.start();
+    }
+
+    CompilationInfo info(func_name, isolate, &zone, flags);
+    code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+    if (debugging) {
+      buffer.Dispose();
+    }
+
+    RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "wasm-to-js", 0,
+                              name);
   }
   return code;
 }
@@ -1951,25 +2128,21 @@
 // Helper function to compile a single function.
 Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
                                  wasm::ModuleEnv* module_env,
-                                 const wasm::WasmFunction& function,
-                                 int index) {
+                                 const wasm::WasmFunction& function) {
   if (FLAG_trace_wasm_compiler || FLAG_trace_wasm_decode_time) {
-    // TODO(titzer): clean me up a bit.
     OFStream os(stdout);
-    os << "Compiling WASM function #" << index << ":";
-    if (function.name_offset > 0) {
-      os << module_env->module->GetName(function.name_offset);
-    }
+    os << "Compiling WASM function "
+       << wasm::WasmFunctionName(&function, module_env) << std::endl;
     os << std::endl;
   }
   // Initialize the function environment for decoding.
   wasm::FunctionEnv env;
   env.module = module_env;
   env.sig = function.sig;
-  env.local_int32_count = function.local_int32_count;
-  env.local_int64_count = function.local_int64_count;
-  env.local_float32_count = function.local_float32_count;
-  env.local_float64_count = function.local_float64_count;
+  env.local_i32_count = function.local_i32_count;
+  env.local_i64_count = function.local_i64_count;
+  env.local_f32_count = function.local_f32_count;
+  env.local_f64_count = function.local_f64_count;
   env.SumLocals();
 
   // Create a TF graph during decoding.
@@ -1993,35 +2166,49 @@
       os << "Compilation failed: " << result << std::endl;
     }
     // Add the function as another context for the exception
-    Vector<char> buffer;
-    SNPrintF(buffer, "Compiling WASM function #%d:%s failed:", index,
+    ScopedVector<char> buffer(128);
+    SNPrintF(buffer, "Compiling WASM function #%d:%s failed:",
+             function.func_index,
              module_env->module->GetName(function.name_offset));
     thrower.Failed(buffer.start(), result);
     return Handle<Code>::null();
   }
 
   // Run the compiler pipeline to generate machine code.
-  CallDescriptor* descriptor = const_cast<CallDescriptor*>(
-      module_env->GetWasmCallDescriptor(&zone, function.sig));
-  CompilationInfo info("wasm", isolate, &zone);
-  info.set_output_code_kind(Code::WASM_FUNCTION);
+  CallDescriptor* descriptor =
+      wasm::ModuleEnv::GetWasmCallDescriptor(&zone, function.sig);
+  if (kPointerSize == 4) {
+    descriptor = module_env->GetI32WasmCallDescriptor(&zone, descriptor);
+  }
+  Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+  // add flags here if a meaningful name is helpful for debugging.
+  bool debugging =
+#if DEBUG
+      true;
+#else
+      FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+#endif
+  const char* func_name = "wasm";
+  Vector<char> buffer;
+  if (debugging) {
+    buffer = Vector<char>::New(128);
+    SNPrintF(buffer, "WASM_function_#%d:%s", function.func_index,
+             module_env->module->GetName(function.name_offset));
+    func_name = buffer.start();
+  }
+  CompilationInfo info(func_name, isolate, &zone, flags);
+
   Handle<Code> code =
       Pipeline::GenerateCodeForTesting(&info, descriptor, &graph);
-
-#ifdef ENABLE_DISASSEMBLER
-  // Disassemble the code for debugging.
-  if (!code.is_null() && FLAG_print_opt_code) {
-    Vector<char> buffer;
-    const char* name = "";
-    if (function.name_offset > 0) {
-      const byte* ptr = module_env->module->module_start + function.name_offset;
-      name = reinterpret_cast<const char*>(ptr);
-    }
-    SNPrintF(buffer, "WASM function #%d:%s", index, name);
-    OFStream os(stdout);
-    code->Disassemble(buffer.start(), os);
+  if (debugging) {
+    buffer.Dispose();
   }
-#endif
+  if (!code.is_null()) {
+    RecordFunctionCompilation(
+        Logger::FUNCTION_TAG, &info, "WASM_function", function.func_index,
+        module_env->module->GetName(function.name_offset));
+  }
+
   return code;
 }
 
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index 1a17a83..2e86b56 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -35,12 +35,12 @@
 // Compiles a single function, producing a code object.
 Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
                                  wasm::ModuleEnv* module_env,
-                                 const wasm::WasmFunction& function, int index);
+                                 const wasm::WasmFunction& function);
 
 // Wraps a JS function, producing a code object that can be called from WASM.
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
                                     Handle<JSFunction> function,
-                                    uint32_t index);
+                                    wasm::FunctionSig* sig, const char* name);
 
 // Wraps a given wasm code object, producing a JSFunction that can be called
 // from JavaScript.
@@ -100,6 +100,7 @@
   Node* Unreachable();
 
   Node* CallDirect(uint32_t index, Node** args);
+  Node* CallImport(uint32_t index, Node** args);
   Node* CallIndirect(uint32_t index, Node** args);
   void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
   void BuildWasmToJSWrapper(Handle<JSFunction> function,
@@ -132,6 +133,8 @@
 
   wasm::FunctionSig* GetFunctionSignature() { return function_signature_; }
 
+  void Int64LoweringForTesting();
+
  private:
   static const int kDefaultBufferSize = 16;
   friend class WasmTrapHelper;
@@ -159,6 +162,7 @@
   Node* MemBuffer(uint32_t offset);
   void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
 
+  Node* BuildCCall(MachineSignature* sig, Node** args);
   Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
   Node* BuildF32Neg(Node* input);
   Node* BuildF64Neg(Node* input);
@@ -176,6 +180,16 @@
   Node* BuildI32Popcnt(Node* input);
   Node* BuildI64Ctz(Node* input);
   Node* BuildI64Popcnt(Node* input);
+  Node* BuildRoundingInstruction(Node* input, ExternalReference ref,
+                                 MachineType type);
+  Node* BuildF32Trunc(Node* input);
+  Node* BuildF32Floor(Node* input);
+  Node* BuildF32Ceil(Node* input);
+  Node* BuildF32NearestInt(Node* input);
+  Node* BuildF64Trunc(Node* input);
+  Node* BuildF64Floor(Node* input);
+  Node* BuildF64Ceil(Node* input);
+  Node* BuildF64NearestInt(Node* input);
 
   Node** Realloc(Node** buffer, size_t count) {
     Node** buf = Buffer(count);
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index 92363dd..3176fd3 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -58,7 +58,7 @@
 // ===========================================================================
 // == ia32 ===================================================================
 // ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
 #define GP_RETURN_REGISTERS eax, edx
 #define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
 #define FP_RETURN_REGISTERS xmm1, xmm2
@@ -76,7 +76,7 @@
 // ===========================================================================
 // == x87 ====================================================================
 // ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
 #define GP_RETURN_REGISTERS eax, edx
 #define FP_RETURN_REGISTERS stX_0
 
@@ -191,15 +191,7 @@
 };
 }  // namespace
 
-
-// General code uses the above configuration data.
-CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
-                                                 FunctionSig* fsig) {
-  MachineSignature::Builder msig(zone, fsig->return_count(),
-                                 fsig->parameter_count());
-  LocationSignature::Builder locations(zone, fsig->return_count(),
-                                       fsig->parameter_count());
-
+static Allocator GetReturnRegisters() {
 #ifdef GP_RETURN_REGISTERS
   static const Register kGPReturnRegisters[] = {GP_RETURN_REGISTERS};
   static const int kGPReturnRegistersCount =
@@ -221,14 +213,10 @@
   Allocator rets(kGPReturnRegisters, kGPReturnRegistersCount,
                  kFPReturnRegisters, kFPReturnRegistersCount);
 
-  // Add return location(s).
-  const int return_count = static_cast<int>(locations.return_count_);
-  for (int i = 0; i < return_count; i++) {
-    LocalType ret = fsig->GetReturn(i);
-    msig.AddReturn(MachineTypeFor(ret));
-    locations.AddReturn(rets.Next(ret));
-  }
+  return rets;
+}
 
+static Allocator GetParameterRegisters() {
 #ifdef GP_PARAM_REGISTERS
   static const Register kGPParamRegisters[] = {GP_PARAM_REGISTERS};
   static const int kGPParamRegistersCount =
@@ -250,6 +238,29 @@
   Allocator params(kGPParamRegisters, kGPParamRegistersCount, kFPParamRegisters,
                    kFPParamRegistersCount);
 
+  return params;
+}
+
+// General code uses the above configuration data.
+CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
+                                                 FunctionSig* fsig) {
+  MachineSignature::Builder msig(zone, fsig->return_count(),
+                                 fsig->parameter_count());
+  LocationSignature::Builder locations(zone, fsig->return_count(),
+                                       fsig->parameter_count());
+
+  Allocator rets = GetReturnRegisters();
+
+  // Add return location(s).
+  const int return_count = static_cast<int>(locations.return_count_);
+  for (int i = 0; i < return_count; i++) {
+    LocalType ret = fsig->GetReturn(i);
+    msig.AddReturn(MachineTypeFor(ret));
+    locations.AddReturn(rets.Next(ret));
+  }
+
+  Allocator params = GetParameterRegisters();
+
   // Add register and/or stack parameter(s).
   const int parameter_count = static_cast<int>(fsig->parameter_count());
   for (int i = 0; i < parameter_count; i++) {
@@ -264,6 +275,7 @@
   // The target for WASM calls is always a code object.
   MachineType target_type = MachineType::AnyTagged();
   LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+
   return new (zone) CallDescriptor(       // --
       CallDescriptor::kCallCodeObject,    // kind
       target_type,                        // target MachineType
@@ -275,8 +287,82 @@
       kCalleeSaveRegisters,               // callee-saved registers
       kCalleeSaveFPRegisters,             // callee-saved fp regs
       CallDescriptor::kUseNativeStack,    // flags
-      "c-call");
+      "wasm-call");
 }
+
+CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
+    Zone* zone, CallDescriptor* descriptor) {
+  const MachineSignature* signature = descriptor->GetMachineSignature();
+  size_t parameter_count = signature->parameter_count();
+  size_t return_count = signature->return_count();
+  for (size_t i = 0; i < signature->parameter_count(); i++) {
+    if (signature->GetParam(i) == MachineType::Int64()) {
+      // For each int64 input we get two int32 inputs.
+      parameter_count++;
+    }
+  }
+  for (size_t i = 0; i < signature->return_count(); i++) {
+    if (signature->GetReturn(i) == MachineType::Int64()) {
+      // For each int64 return we get two int32 returns.
+      return_count++;
+    }
+  }
+  if (parameter_count == signature->parameter_count() &&
+      return_count == signature->return_count()) {
+    // If there is no int64 parameter or return value, we can just return the
+    // original descriptor.
+    return descriptor;
+  }
+
+  MachineSignature::Builder msig(zone, return_count, parameter_count);
+  LocationSignature::Builder locations(zone, return_count, parameter_count);
+
+  Allocator rets = GetReturnRegisters();
+
+  for (size_t i = 0; i < signature->return_count(); i++) {
+    if (signature->GetReturn(i) == MachineType::Int64()) {
+      // For each int64 return we get two int32 returns.
+      msig.AddReturn(MachineType::Int32());
+      msig.AddReturn(MachineType::Int32());
+      locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
+      locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
+    } else {
+      msig.AddReturn(signature->GetReturn(i));
+      locations.AddReturn(rets.Next(signature->GetReturn(i).representation()));
+    }
+  }
+
+  Allocator params = GetParameterRegisters();
+
+  for (size_t i = 0; i < signature->parameter_count(); i++) {
+    if (signature->GetParam(i) == MachineType::Int64()) {
+      // For each int64 input we get two int32 inputs.
+      msig.AddParam(MachineType::Int32());
+      msig.AddParam(MachineType::Int32());
+      locations.AddParam(params.Next(MachineRepresentation::kWord32));
+      locations.AddParam(params.Next(MachineRepresentation::kWord32));
+    } else {
+      msig.AddParam(signature->GetParam(i));
+      locations.AddParam(params.Next(signature->GetParam(i).representation()));
+    }
+  }
+
+  return new (zone) CallDescriptor(          // --
+      descriptor->kind(),                    // kind
+      descriptor->GetInputType(0),           // target MachineType
+      descriptor->GetInputLocation(0),       // target location
+      msig.Build(),                          // machine_sig
+      locations.Build(),                     // location_sig
+      params.stack_offset,                   // stack_parameter_count
+      descriptor->properties(),              // properties
+      descriptor->CalleeSavedRegisters(),    // callee-saved registers
+      descriptor->CalleeSavedFPRegisters(),  // callee-saved fp regs
+      descriptor->flags(),                   // flags
+      descriptor->debug_name());
+
+  return descriptor;
+}
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index be406fb..510c0c6 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -209,15 +209,16 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, zero,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, zero,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ leap(scratch1_, operand_);
     __ CallStub(&stub);
   }
@@ -261,6 +262,32 @@
     }                                                          \
   } while (0)
 
+#define ASSEMBLE_COMPARE(asm_instr)                                   \
+  do {                                                                \
+    if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
+      size_t index = 0;                                               \
+      Operand left = i.MemoryOperand(&index);                         \
+      if (HasImmediateInput(instr, index)) {                          \
+        __ asm_instr(left, i.InputImmediate(index));                  \
+      } else {                                                        \
+        __ asm_instr(left, i.InputRegister(index));                   \
+      }                                                               \
+    } else {                                                          \
+      if (HasImmediateInput(instr, 1)) {                              \
+        if (instr->InputAt(0)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputImmediate(1));      \
+        } else {                                                      \
+          __ asm_instr(i.InputOperand(0), i.InputImmediate(1));       \
+        }                                                             \
+      } else {                                                        \
+        if (instr->InputAt(1)->IsRegister()) {                        \
+          __ asm_instr(i.InputRegister(0), i.InputRegister(1));       \
+        } else {                                                      \
+          __ asm_instr(i.InputRegister(0), i.InputOperand(1));        \
+        }                                                             \
+      }                                                               \
+    }                                                                 \
+  } while (0)
 
 #define ASSEMBLE_MULT(asm_instr)                              \
   do {                                                        \
@@ -654,11 +681,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      break;
-    }
     case kArchPrepareCallCFunction: {
       // Frame alignment requires using FP-relative frame addressing.
       frame_access_state()->SetFrameAccessToFP();
@@ -712,6 +734,13 @@
     case kArchFramePointer:
       __ movq(i.OutputRegister(), rbp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ movq(i.OutputRegister(), Operand(rbp, 0));
+      } else {
+        __ movq(i.OutputRegister(), rbp);
+      }
+      break;
     case kArchTruncateDoubleToI: {
       auto result = i.OutputRegister();
       auto input = i.InputDoubleRegister(0);
@@ -740,6 +769,18 @@
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      Register base;
+      if (offset.from_stack_pointer()) {
+        base = rsp;
+      } else {
+        base = rbp;
+      }
+      __ leaq(i.OutputRegister(), Operand(base, offset.offset()));
+      break;
+    }
     case kX64Add32:
       ASSEMBLE_BINOP(addl);
       break;
@@ -759,16 +800,16 @@
       ASSEMBLE_BINOP(andq);
       break;
     case kX64Cmp32:
-      ASSEMBLE_BINOP(cmpl);
+      ASSEMBLE_COMPARE(cmpl);
       break;
     case kX64Cmp:
-      ASSEMBLE_BINOP(cmpq);
+      ASSEMBLE_COMPARE(cmpq);
       break;
     case kX64Test32:
-      ASSEMBLE_BINOP(testl);
+      ASSEMBLE_COMPARE(testl);
       break;
     case kX64Test:
-      ASSEMBLE_BINOP(testq);
+      ASSEMBLE_COMPARE(testq);
       break;
     case kX64Imul32:
       ASSEMBLE_MULT(imull);
@@ -947,6 +988,22 @@
       __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
       break;
     }
+    case kSSEFloat32ToInt32:
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ Cvttss2si(i.OutputRegister(), i.InputDoubleRegister(0));
+      } else {
+        __ Cvttss2si(i.OutputRegister(), i.InputOperand(0));
+      }
+      break;
+    case kSSEFloat32ToUint32: {
+      if (instr->InputAt(0)->IsDoubleRegister()) {
+        __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+      } else {
+        __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
+      }
+      __ AssertZeroExtended(i.OutputRegister());
+      break;
+    }
     case kSSEFloat64Cmp:
       ASSEMBLE_SSE_BINOP(Ucomisd);
       break;
@@ -1197,6 +1254,13 @@
         __ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
+    case kSSEInt32ToFloat32:
+      if (instr->InputAt(0)->IsRegister()) {
+        __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
+      } else {
+        __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+      }
+      break;
     case kSSEInt64ToFloat32:
       if (instr->InputAt(0)->IsRegister()) {
         __ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
@@ -1237,6 +1301,14 @@
       }
       __ Cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
       break;
+    case kSSEUint32ToFloat32:
+      if (instr->InputAt(0)->IsRegister()) {
+        __ movl(kScratchRegister, i.InputRegister(0));
+      } else {
+        __ movl(kScratchRegister, i.InputOperand(0));
+      }
+      __ Cvtqsi2ss(i.OutputDoubleRegister(), kScratchRegister);
+      break;
     case kSSEFloat64ExtractLowWord32:
       if (instr->InputAt(0)->IsDoubleStackSlot()) {
         __ movl(i.OutputRegister(), i.InputOperand(0));
@@ -1828,8 +1900,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -=
         static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
   }
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 8e8e765..6d5e77c 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -63,6 +63,8 @@
   V(SSEFloat32Max)                 \
   V(SSEFloat32Min)                 \
   V(SSEFloat32ToFloat64)           \
+  V(SSEFloat32ToInt32)             \
+  V(SSEFloat32ToUint32)            \
   V(SSEFloat32Round)               \
   V(SSEFloat64Cmp)                 \
   V(SSEFloat64Add)                 \
@@ -84,11 +86,13 @@
   V(SSEFloat32ToUint64)            \
   V(SSEFloat64ToUint64)            \
   V(SSEInt32ToFloat64)             \
+  V(SSEInt32ToFloat32)             \
   V(SSEInt64ToFloat32)             \
   V(SSEInt64ToFloat64)             \
   V(SSEUint64ToFloat32)            \
   V(SSEUint64ToFloat64)            \
   V(SSEUint32ToFloat64)            \
+  V(SSEUint32ToFloat32)            \
   V(SSEFloat64ExtractLowWord32)    \
   V(SSEFloat64ExtractHighWord32)   \
   V(SSEFloat64InsertLowWord32)     \
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index f8537c8..1f10b51 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -79,6 +79,8 @@
     case kSSEFloat64Max:
     case kSSEFloat64Min:
     case kSSEFloat64ToFloat32:
+    case kSSEFloat32ToInt32:
+    case kSSEFloat32ToUint32:
     case kSSEFloat64ToInt32:
     case kSSEFloat64ToUint32:
     case kSSEFloat64ToInt64:
@@ -86,11 +88,13 @@
     case kSSEFloat64ToUint64:
     case kSSEFloat32ToUint64:
     case kSSEInt32ToFloat64:
+    case kSSEInt32ToFloat32:
     case kSSEInt64ToFloat32:
     case kSSEInt64ToFloat64:
     case kSSEUint64ToFloat32:
     case kSSEUint64ToFloat64:
     case kSSEUint32ToFloat64:
+    case kSSEUint32ToFloat32:
     case kSSEFloat64ExtractLowWord32:
     case kSSEFloat64ExtractHighWord32:
     case kSSEFloat64InsertLowWord32:
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index c47a42e..d3a2a8e 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -133,6 +133,7 @@
     case MachineRepresentation::kWord64:
       opcode = kX64Movq;
       break;
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -219,6 +220,7 @@
       case MachineRepresentation::kWord64:
         opcode = kX64Movq;
         break;
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -264,8 +266,9 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:
-    case MachineRepresentation::kTagged:
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -316,8 +319,9 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:
-    case MachineRepresentation::kTagged:
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -622,6 +626,12 @@
 }
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+
+
 void InstructionSelector::VisitWord32Popcnt(Node* node) {
   X64OperandGenerator g(this);
   Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -734,10 +744,11 @@
   if (selector->IsLive(left) && !selector->IsLive(right)) {
     std::swap(left, right);
   }
+  InstructionOperand temps[] = {g.TempRegister(rax)};
   // TODO(turbofan): We use UseUniqueRegister here to improve register
   // allocation.
   selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
-                 g.UseUniqueRegister(right));
+                 g.UseUniqueRegister(right), arraysize(temps), temps);
 }
 
 
@@ -752,9 +763,10 @@
 
 void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
   X64OperandGenerator g(selector);
-  selector->Emit(opcode, g.DefineAsFixed(node, rdx),
-                 g.UseFixed(node->InputAt(0), rax),
-                 g.UseUniqueRegister(node->InputAt(1)));
+  InstructionOperand temps[] = {g.TempRegister(rax)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
+      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
 }
 
 }  // namespace
@@ -857,6 +869,18 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   X64OperandGenerator g(this);
   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1046,6 +1070,12 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEInt32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
   X64OperandGenerator g(this);
   Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -1058,6 +1088,12 @@
 }
 
 
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
   X64OperandGenerator g(this);
   InstructionOperand temps[] = {g.TempRegister()};
@@ -1303,6 +1339,48 @@
 
 namespace {
 
+void VisitCompareWithMemoryOperand(InstructionSelector* selector,
+                                   InstructionCode opcode, Node* left,
+                                   InstructionOperand right,
+                                   FlagsContinuation* cont) {
+  DCHECK(left->opcode() == IrOpcode::kLoad);
+  X64OperandGenerator g(selector);
+  size_t input_count = 0;
+  InstructionOperand inputs[6];
+  AddressingMode addressing_mode =
+      g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
+  opcode |= AddressingModeField::encode(addressing_mode);
+  opcode = cont->Encode(opcode);
+  inputs[input_count++] = right;
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else {
+    DCHECK(cont->IsSet());
+    InstructionOperand output = g.DefineAsRegister(cont->result());
+    selector->Emit(opcode, 1, &output, input_count, inputs);
+  }
+}
+
+// Determines if {input} of {node} can be replaced by a memory operand.
+bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
+                         Node* node, Node* input) {
+  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
+    return false;
+  }
+  MachineRepresentation rep =
+      LoadRepresentationOf(input->op()).representation();
+  if (rep == MachineRepresentation::kWord64 ||
+      rep == MachineRepresentation::kTagged) {
+    return opcode == kX64Cmp || opcode == kX64Test;
+  } else if (rep == MachineRepresentation::kWord32) {
+    return opcode == kX64Cmp32 || opcode == kX64Test32;
+  }
+  return false;
+}
+
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
@@ -1330,26 +1408,41 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
-
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       InstructionCode opcode, FlagsContinuation* cont) {
   X64OperandGenerator g(selector);
-  Node* const left = node->InputAt(0);
-  Node* const right = node->InputAt(1);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
 
-  // Match immediates on left or right side of comparison.
-  if (g.CanBeImmediate(right)) {
-    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
-  } else if (g.CanBeImmediate(left)) {
+  // If one of the two inputs is an immediate, make sure it's on the right.
+  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
-  } else {
-    VisitCompare(selector, opcode, left, right, cont,
-                 node->op()->HasProperty(Operator::kCommutative));
+    std::swap(left, right);
   }
-}
 
+  // Match immediates on right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+      return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                           g.UseImmediate(right), cont);
+    }
+    return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
+                        cont);
+  }
+
+  if (g.CanBeBetterLeftOperand(right)) {
+    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+    std::swap(left, right);
+  }
+
+  if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                         g.UseRegister(right), cont);
+  }
+  return VisitCompare(selector, opcode, left, right, cont,
+                      node->op()->HasProperty(Operator::kCommutative));
+}
 
 // Shared routine for 64-bit word comparison operations.
 void VisitWord64Compare(InstructionSelector* selector, Node* node,
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index a7b7246..1575570 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -9,6 +9,7 @@
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/osr.h"
+#include "src/frames.h"
 #include "src/x87/assembler-x87.h"
 #include "src/x87/frames-x87.h"
 #include "src/x87/macro-assembler-x87.h"
@@ -50,7 +51,7 @@
 
   Operand ToMaterializableOperand(int materializable_offset) {
     FrameOffset offset = frame_access_state()->GetFrameOffset(
-        Frame::FPOffsetToSlot(materializable_offset));
+        FPOffsetToFrameSlot(materializable_offset));
     return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
   }
 
@@ -245,15 +246,16 @@
     if (mode_ > RecordWriteMode::kValueIsPointer) {
       __ JumpIfSmi(value_, exit());
     }
-    if (mode_ > RecordWriteMode::kValueIsMap) {
-      __ CheckPageFlag(value_, scratch0_,
-                       MemoryChunk::kPointersToHereAreInterestingMask, zero,
-                       exit());
-    }
+    __ CheckPageFlag(value_, scratch0_,
+                     MemoryChunk::kPointersToHereAreInterestingMask, zero,
+                     exit());
+    RememberedSetAction const remembered_set_action =
+        mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+                                             : OMIT_REMEMBERED_SET;
     SaveFPRegsMode const save_fp_mode =
         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
-                         EMIT_REMEMBERED_SET, save_fp_mode);
+                         remembered_set_action, save_fp_mode);
     __ lea(scratch1_, operand_);
     __ CallStub(&stub);
   }
@@ -462,14 +464,6 @@
       frame_access_state()->ClearSPDelta();
       break;
     }
-    case kArchLazyBailout: {
-      EnsureSpaceForLazyDeopt();
-      RecordCallPosition(instr);
-      // Lazy Bailout entry, need to re-initialize FPU state.
-      __ fninit();
-      __ fld1();
-      break;
-    }
     case kArchPrepareCallCFunction: {
       // Frame alignment requires using FP-relative frame addressing.
       frame_access_state()->SetFrameAccessToFP();
@@ -559,6 +553,13 @@
     case kArchStackPointer:
       __ mov(i.OutputRegister(), esp);
       break;
+    case kArchParentFramePointer:
+      if (frame_access_state()->frame()->needs_frame()) {
+        __ mov(i.OutputRegister(), Operand(ebp, 0));
+      } else {
+        __ mov(i.OutputRegister(), ebp);
+      }
+      break;
     case kArchTruncateDoubleToI: {
       if (!instr->InputAt(0)->IsDoubleRegister()) {
         __ fld_d(i.InputOperand(0));
@@ -587,6 +588,18 @@
       __ bind(ool->exit());
       break;
     }
+    case kArchStackSlot: {
+      FrameOffset offset =
+          frame_access_state()->GetFrameOffset(i.InputInt32(0));
+      Register base;
+      if (offset.from_stack_pointer()) {
+        base = esp;
+      } else {
+        base = ebp;
+      }
+      __ lea(i.OutputRegister(), Operand(base, offset.offset()));
+      break;
+    }
     case kX87Add:
       if (HasImmediateInput(instr, 1)) {
         __ add(i.InputOperand(0), i.InputImmediate(1));
@@ -602,17 +615,37 @@
       }
       break;
     case kX87Cmp:
-      if (HasImmediateInput(instr, 1)) {
-        __ cmp(i.InputOperand(0), i.InputImmediate(1));
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ cmp(operand, i.InputImmediate(index));
+        } else {
+          __ cmp(operand, i.InputRegister(index));
+        }
       } else {
-        __ cmp(i.InputRegister(0), i.InputOperand(1));
+        if (HasImmediateInput(instr, 1)) {
+          __ cmp(i.InputOperand(0), i.InputImmediate(1));
+        } else {
+          __ cmp(i.InputRegister(0), i.InputOperand(1));
+        }
       }
       break;
     case kX87Test:
-      if (HasImmediateInput(instr, 1)) {
-        __ test(i.InputOperand(0), i.InputImmediate(1));
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ test(operand, i.InputImmediate(index));
+        } else {
+          __ test(i.InputRegister(index), operand);
+        }
       } else {
-        __ test(i.InputRegister(0), i.InputOperand(1));
+        if (HasImmediateInput(instr, 1)) {
+          __ test(i.InputOperand(0), i.InputImmediate(1));
+        } else {
+          __ test(i.InputRegister(0), i.InputOperand(1));
+        }
       }
       break;
     case kX87Imul:
@@ -1062,6 +1095,66 @@
       __ lea(esp, Operand(esp, kDoubleSize));
       break;
     }
+    case kX87Int32ToFloat32: {
+      InstructionOperand* input = instr->InputAt(0);
+      DCHECK(input->IsRegister() || input->IsStackSlot());
+      if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+        __ VerifyX87StackDepth(1);
+      }
+      __ fstp(0);
+      if (input->IsRegister()) {
+        Register input_reg = i.InputRegister(0);
+        __ push(input_reg);
+        __ fild_s(Operand(esp, 0));
+        __ pop(input_reg);
+      } else {
+        __ fild_s(i.InputOperand(0));
+      }
+      break;
+    }
+    case kX87Uint32ToFloat32: {
+      InstructionOperand* input = instr->InputAt(0);
+      DCHECK(input->IsRegister() || input->IsStackSlot());
+      if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+        __ VerifyX87StackDepth(1);
+      }
+      __ fstp(0);
+      Label msb_set_src;
+      Label jmp_return;
+      // Put input integer into eax(tmporarilly)
+      __ push(eax);
+      if (input->IsRegister())
+        __ mov(eax, i.InputRegister(0));
+      else
+        __ mov(eax, i.InputOperand(0));
+
+      __ test(eax, eax);
+      __ j(sign, &msb_set_src, Label::kNear);
+      __ push(eax);
+      __ fild_s(Operand(esp, 0));
+      __ pop(eax);
+
+      __ jmp(&jmp_return, Label::kNear);
+      __ bind(&msb_set_src);
+      // Need another temp reg
+      __ push(ebx);
+      __ mov(ebx, eax);
+      __ shr(eax, 1);
+      // Recover the least significant bit to avoid rounding errors.
+      __ and_(ebx, Immediate(1));
+      __ or_(eax, ebx);
+      __ push(eax);
+      __ fild_s(Operand(esp, 0));
+      __ pop(eax);
+      __ fld(0);
+      __ faddp();
+      // Restore the ebx
+      __ pop(ebx);
+      __ bind(&jmp_return);
+      // Restore the eax
+      __ pop(eax);
+      break;
+    }
     case kX87Int32ToFloat64: {
       InstructionOperand* input = instr->InputAt(0);
       DCHECK(input->IsRegister() || input->IsStackSlot());
@@ -1104,6 +1197,36 @@
       __ LoadUint32NoSSE2(i.InputRegister(0));
       break;
     }
+    case kX87Float32ToInt32: {
+      if (!instr->InputAt(0)->IsDoubleRegister()) {
+        __ fld_s(i.InputOperand(0));
+      }
+      __ TruncateX87TOSToI(i.OutputRegister(0));
+      if (!instr->InputAt(0)->IsDoubleRegister()) {
+        __ fstp(0);
+      }
+      break;
+    }
+    case kX87Float32ToUint32: {
+      if (!instr->InputAt(0)->IsDoubleRegister()) {
+        __ fld_s(i.InputOperand(0));
+      }
+      Label success;
+      __ TruncateX87TOSToI(i.OutputRegister(0));
+      __ test(i.OutputRegister(0), i.OutputRegister(0));
+      __ j(positive, &success);
+      __ push(Immediate(INT32_MIN));
+      __ fild_s(Operand(esp, 0));
+      __ lea(esp, Operand(esp, kPointerSize));
+      __ faddp();
+      __ TruncateX87TOSToI(i.OutputRegister(0));
+      __ or_(i.OutputRegister(0), Immediate(0x80000000));
+      __ bind(&success);
+      if (!instr->InputAt(0)->IsDoubleRegister()) {
+        __ fstp(0);
+      }
+      break;
+    }
     case kX87Float64ToInt32: {
       if (!instr->InputAt(0)->IsDoubleRegister()) {
         __ fld_d(i.InputOperand(0));
@@ -1817,8 +1940,6 @@
     // remaining stack slots.
     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
     osr_pc_offset_ = __ pc_offset();
-    // TODO(titzer): cannot address target function == local #-1
-    __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   }
 
diff --git a/src/compiler/x87/instruction-codes-x87.h b/src/compiler/x87/instruction-codes-x87.h
index b498d9c..e5d0912 100644
--- a/src/compiler/x87/instruction-codes-x87.h
+++ b/src/compiler/x87/instruction-codes-x87.h
@@ -53,10 +53,14 @@
   V(X87Float64Max)                 \
   V(X87Float64Min)                 \
   V(X87Float64Abs)                 \
+  V(X87Int32ToFloat32)             \
+  V(X87Uint32ToFloat32)            \
   V(X87Int32ToFloat64)             \
   V(X87Float32ToFloat64)           \
   V(X87Uint32ToFloat64)            \
   V(X87Float64ToInt32)             \
+  V(X87Float32ToInt32)             \
+  V(X87Float32ToUint32)            \
   V(X87Float64ToFloat32)           \
   V(X87Float64ToUint32)            \
   V(X87Float64ExtractHighWord32)   \
@@ -84,7 +88,6 @@
   V(X87Poke)                       \
   V(X87StackCheck)
 
-
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
 // are encoded into the InstructionCode of the instruction and tell the
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index cff4aaf..079d5d2 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -151,7 +151,8 @@
     case MachineRepresentation::kWord32:
       opcode = kX87Movl;
       break;
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -236,7 +237,8 @@
       case MachineRepresentation::kWord32:
         opcode = kX87Movl;
         break;
-      case MachineRepresentation::kWord64:  // Fall through.
+      case MachineRepresentation::kWord64:   // Fall through.
+      case MachineRepresentation::kSimd128:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -288,9 +290,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedLoadFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -334,9 +337,10 @@
     case MachineRepresentation::kFloat64:
       opcode = kCheckedStoreFloat64;
       break;
-    case MachineRepresentation::kBit:     // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:  // Fall through.
+    case MachineRepresentation::kBit:      // Fall through.
+    case MachineRepresentation::kTagged:   // Fall through.
+    case MachineRepresentation::kWord64:   // Fall through.
+    case MachineRepresentation::kSimd128:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -469,9 +473,10 @@
 void VisitMulHigh(InstructionSelector* selector, Node* node,
                   ArchOpcode opcode) {
   X87OperandGenerator g(selector);
-  selector->Emit(opcode, g.DefineAsFixed(node, edx),
-                 g.UseFixed(node->InputAt(0), eax),
-                 g.UseUniqueRegister(node->InputAt(1)));
+  InstructionOperand temps[] = {g.TempRegister(eax)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, edx), g.UseFixed(node->InputAt(0), eax),
+      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
 }
 
 
@@ -549,6 +554,9 @@
 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
 
 
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
 void InstructionSelector::VisitWord32Popcnt(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -655,6 +663,20 @@
 }
 
 
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87Int32ToFloat32, g.DefineAsFixed(node, stX_0),
+       g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87Uint32ToFloat32, g.DefineAsFixed(node, stX_0),
+       g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87Int32ToFloat64, g.DefineAsFixed(node, stX_0),
@@ -669,6 +691,18 @@
 }
 
 
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87Float32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+  X87OperandGenerator g(this);
+  Emit(kX87Float32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
   X87OperandGenerator g(this);
   Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -959,6 +993,46 @@
 
 namespace {
 
+void VisitCompareWithMemoryOperand(InstructionSelector* selector,
+                                   InstructionCode opcode, Node* left,
+                                   InstructionOperand right,
+                                   FlagsContinuation* cont) {
+  DCHECK(left->opcode() == IrOpcode::kLoad);
+  X87OperandGenerator g(selector);
+  size_t input_count = 0;
+  InstructionOperand inputs[6];
+  AddressingMode addressing_mode =
+      g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
+  opcode |= AddressingModeField::encode(addressing_mode);
+  opcode = cont->Encode(opcode);
+  inputs[input_count++] = right;
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
+  } else {
+    DCHECK(cont->IsSet());
+    InstructionOperand output = g.DefineAsRegister(cont->result());
+    selector->Emit(opcode, 1, &output, input_count, inputs);
+  }
+}
+
+// Determines if {input} of {node} can be replaced by a memory operand.
+bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
+                         Node* node, Node* input) {
+  if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
+    return false;
+  }
+  MachineRepresentation load_representation =
+      LoadRepresentationOf(input->op()).representation();
+  if (load_representation == MachineRepresentation::kWord32 ||
+      load_representation == MachineRepresentation::kTagged) {
+    return opcode == kX87Cmp || opcode == kX87Test;
+  }
+  return false;
+}
+
 // Shared routine for multiple compare operations.
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
@@ -1020,26 +1094,41 @@
   }
 }
 
-
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       InstructionCode opcode, FlagsContinuation* cont) {
   X87OperandGenerator g(selector);
-  Node* const left = node->InputAt(0);
-  Node* const right = node->InputAt(1);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
 
-  // Match immediates on left or right side of comparison.
-  if (g.CanBeImmediate(right)) {
-    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
-  } else if (g.CanBeImmediate(left)) {
+  // If one of the two inputs is an immediate, make sure it's on the right.
+  if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
     if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
-  } else {
-    VisitCompare(selector, opcode, left, right, cont,
-                 node->op()->HasProperty(Operator::kCommutative));
+    std::swap(left, right);
   }
-}
 
+  // Match immediates on right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    if (CanUseMemoryOperand(selector, opcode, node, left)) {
+      return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                           g.UseImmediate(right), cont);
+    }
+    return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
+                        cont);
+  }
+
+  if (g.CanBeBetterLeftOperand(right)) {
+    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+    std::swap(left, right);
+  }
+
+  if (CanUseMemoryOperand(selector, opcode, node, left)) {
+    return VisitCompareWithMemoryOperand(selector, opcode, left,
+                                         g.UseRegister(right), cont);
+  }
+  return VisitCompare(selector, opcode, left, right, cont,
+                      node->op()->HasProperty(Operator::kCommutative));
+}
 
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       FlagsContinuation* cont) {
diff --git a/src/context-measure.cc b/src/context-measure.cc
index 0b87e39..3423629 100644
--- a/src/context-measure.cc
+++ b/src/context-measure.cc
@@ -30,7 +30,7 @@
   if (object->IsSharedFunctionInfo()) return true;
   if (object->IsScopeInfo()) return true;
   if (object->IsCode() && !Code::cast(object)->is_optimized_code()) return true;
-  if (object->IsExecutableAccessorInfo()) return true;
+  if (object->IsAccessorInfo()) return true;
   if (object->IsWeakCell()) return true;
   return false;
 }
diff --git a/src/contexts-inl.h b/src/contexts-inl.h
index 67257ae..c26ce5b 100644
--- a/src/contexts-inl.h
+++ b/src/contexts-inl.h
@@ -33,7 +33,8 @@
 Handle<Context> ScriptContextTable::GetContext(Handle<ScriptContextTable> table,
                                                int i) {
   DCHECK(i < table->used());
-  return Handle<Context>::cast(FixedArray::get(table, i + kFirstContextSlot));
+  return Handle<Context>::cast(
+      FixedArray::get(*table, i + kFirstContextSlot, table->GetIsolate()));
 }
 
 
diff --git a/src/contexts.cc b/src/contexts.cc
index 79a9e92..7549d20 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -79,6 +79,15 @@
   return current;
 }
 
+Context* Context::closure_context() {
+  Context* current = this;
+  while (!current->IsFunctionContext() && !current->IsScriptContext() &&
+         !current->IsNativeContext()) {
+    current = current->previous();
+    DCHECK(current->closure() == closure());
+  }
+  return current;
+}
 
 JSObject* Context::extension_object() {
   DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext());
@@ -542,16 +551,6 @@
 #undef COMPARE_NAME
 
 
-bool Context::IsJSBuiltin(Handle<Context> native_context,
-                          Handle<JSFunction> function) {
-#define COMPARE_FUNCTION(index, type, name) \
-  if (*function == native_context->get(index)) return true;
-  NATIVE_CONTEXT_JS_BUILTINS(COMPARE_FUNCTION);
-#undef COMPARE_FUNCTION
-  return false;
-}
-
-
 #ifdef DEBUG
 
 bool Context::IsBootstrappingOrNativeContext(Isolate* isolate, Object* object) {
diff --git a/src/contexts.h b/src/contexts.h
index 6c9e195..38ebf64 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -93,13 +93,8 @@
   V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property)   \
   V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property)   \
   V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments)                 \
-  V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)
-
-
-#define NATIVE_CONTEXT_JS_BUILTINS(V)                   \
-  V(CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX, JSFunction, \
-    concat_iterable_to_array_builtin)
-
+  V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)                   \
+  V(ORDINARY_HAS_INSTANCE_INDEX, JSFunction, ordinary_has_instance)
 
 #define NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                     \
   V(ARRAY_CONCAT_INDEX, JSFunction, array_concat)                             \
@@ -145,7 +140,6 @@
   V(PROMISE_REJECT_INDEX, JSFunction, promise_reject)                         \
   V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve)                       \
   V(PROMISE_THEN_INDEX, JSFunction, promise_then)                             \
-  V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate)                       \
   V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function)             \
   V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function)     \
   V(SET_ADD_METHOD_INDEX, JSFunction, set_add)                                \
@@ -154,13 +148,14 @@
   V(STACK_OVERFLOW_BOILERPLATE_INDEX, JSObject, stack_overflow_boilerplate)   \
   V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function)           \
   V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function)               \
-  V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)                 \
-  NATIVE_CONTEXT_JS_BUILTINS(V)
+  V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)
 
 #define NATIVE_CONTEXT_FIELDS(V)                                               \
   V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object)                         \
   V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data)                            \
   /* Below is alpha-sorted */                                                  \
+  V(ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX, Map,                               \
+    accessor_property_descriptor_map)                                          \
   V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings)    \
   V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun)                      \
   V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map)                             \
@@ -177,6 +172,7 @@
     call_as_constructor_delegate)                                              \
   V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate)    \
   V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function)  \
+  V(DATA_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, data_property_descriptor_map)     \
   V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun)                            \
   V(DATE_FUNCTION_INDEX, JSFunction, date_function)                            \
   V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object,                     \
@@ -188,7 +184,8 @@
   V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun)                    \
   V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function)                  \
   V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun)                    \
-  V(FUNCTION_CACHE_INDEX, ObjectHashTable, function_cache)                     \
+  V(TEMPLATE_INSTANTIATIONS_CACHE_INDEX, ObjectHashTable,                      \
+    template_instantiations_cache)                                             \
   V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function)                    \
   V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction,                             \
     generator_function_function)                                               \
@@ -453,6 +450,9 @@
   Context* declaration_context();
   bool is_declaration_context();
 
+  // Get the next closure's context on the context chain.
+  Context* closure_context();
+
   // Returns a JSGlobalProxy object or null.
   JSObject* global_proxy();
   void set_global_proxy(JSObject* global);
@@ -502,9 +502,6 @@
   static int ImportedFieldIndexForName(Handle<String> name);
   static int IntrinsicIndexForName(Handle<String> name);
 
-  static bool IsJSBuiltin(Handle<Context> native_context,
-                          Handle<JSFunction> function);
-
 #define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
   inline void set_##name(type* value);                    \
   inline bool is_##name(type* value);                     \
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 3e56799..730e647 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -127,6 +127,10 @@
   return DoubleToUint32(number->Number());
 }
 
+int64_t NumberToInt64(Object* number) {
+  if (number->IsSmi()) return Smi::cast(number)->value();
+  return static_cast<int64_t>(number->Number());
+}
 
 bool TryNumberToSize(Isolate* isolate, Object* number, size_t* result) {
   SealHandleScope shs(isolate);
diff --git a/src/conversions.h b/src/conversions.h
index 9b6d83b..29262e5 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -172,7 +172,7 @@
 // Convert from Number object to C integer.
 inline int32_t NumberToInt32(Object* number);
 inline uint32_t NumberToUint32(Object* number);
-
+inline int64_t NumberToInt64(Object* number);
 
 double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
                       int flags, double empty_string_val = 0.0);
diff --git a/src/counters.cc b/src/counters.cc
index e8dea2e..a10494e 100644
--- a/src/counters.cc
+++ b/src/counters.cc
@@ -4,6 +4,8 @@
 
 #include "src/counters.h"
 
+#include <iomanip>
+
 #include "src/base/platform/platform.h"
 #include "src/isolate.h"
 #include "src/log-inl.h"
@@ -193,5 +195,141 @@
 #undef HM
 }
 
+class RuntimeCallStatEntries {
+ public:
+  void Print(std::ostream& os) {
+    if (total_call_count == 0) return;
+    std::sort(entries.rbegin(), entries.rend());
+    os << std::setw(50) << "Runtime Function/C++ Builtin" << std::setw(10)
+       << "Time" << std::setw(18) << "Count" << std::endl
+       << std::string(86, '=') << std::endl;
+    for (Entry& entry : entries) {
+      entry.SetTotal(total_time, total_call_count);
+      entry.Print(os);
+    }
+    os << std::string(86, '-') << std::endl;
+    Entry("Total", total_time, total_call_count).Print(os);
+  }
+
+  void Add(RuntimeCallCounter* counter) {
+    if (counter->count == 0) return;
+    entries.push_back(Entry(counter->name, counter->time, counter->count));
+    total_time += counter->time;
+    total_call_count += counter->count;
+  }
+
+ private:
+  class Entry {
+   public:
+    Entry(const char* name, base::TimeDelta time, uint64_t count)
+        : name_(name),
+          time_(time.InMilliseconds()),
+          count_(count),
+          time_percent_(100),
+          count_percent_(100) {}
+
+    bool operator<(const Entry& other) const {
+      if (time_ < other.time_) return true;
+      if (time_ > other.time_) return false;
+      return count_ < other.count_;
+    }
+
+    void Print(std::ostream& os) {
+      os.precision(2);
+      os << std::fixed;
+      os << std::setw(50) << name_;
+      os << std::setw(8) << time_ << "ms ";
+      os << std::setw(6) << time_percent_ << "%";
+      os << std::setw(10) << count_ << " ";
+      os << std::setw(6) << count_percent_ << "%";
+      os << std::endl;
+    }
+
+    void SetTotal(base::TimeDelta total_time, uint64_t total_count) {
+      if (total_time.InMilliseconds() == 0) {
+        time_percent_ = 0;
+      } else {
+        time_percent_ = 100.0 * time_ / total_time.InMilliseconds();
+      }
+      count_percent_ = 100.0 * count_ / total_count;
+    }
+
+   private:
+    const char* name_;
+    int64_t time_;
+    uint64_t count_;
+    double time_percent_;
+    double count_percent_;
+  };
+
+  uint64_t total_call_count = 0;
+  base::TimeDelta total_time;
+  std::vector<Entry> entries;
+};
+
+void RuntimeCallCounter::Reset() {
+  count = 0;
+  time = base::TimeDelta();
+}
+
+void RuntimeCallStats::Enter(RuntimeCallCounter* counter) {
+  Enter(new RuntimeCallTimer(counter, current_timer_));
+}
+
+void RuntimeCallStats::Enter(RuntimeCallTimer* timer_) {
+  current_timer_ = timer_;
+  current_timer_->Start();
+}
+
+void RuntimeCallStats::Leave() {
+  RuntimeCallTimer* timer = current_timer_;
+  Leave(timer);
+  delete timer;
+}
+
+void RuntimeCallStats::Leave(RuntimeCallTimer* timer) {
+  current_timer_ = timer->Stop();
+}
+
+void RuntimeCallStats::Print(std::ostream& os) {
+  RuntimeCallStatEntries entries;
+
+#define PRINT_COUNTER(name, nargs, ressize) entries.Add(&this->Runtime_##name);
+  FOR_EACH_INTRINSIC(PRINT_COUNTER)
+#undef PRINT_COUNTER
+
+#define PRINT_COUNTER(name, type) entries.Add(&this->Builtin_##name);
+  BUILTIN_LIST_C(PRINT_COUNTER)
+#undef PRINT_COUNTER
+
+  entries.Add(&this->ExternalCallback);
+  entries.Add(&this->UnexpectedStubMiss);
+
+  entries.Print(os);
+}
+
+void RuntimeCallStats::Reset() {
+#define RESET_COUNTER(name, nargs, ressize) this->Runtime_##name.Reset();
+  FOR_EACH_INTRINSIC(RESET_COUNTER)
+#undef RESET_COUNTER
+#define RESET_COUNTER(name, type) this->Builtin_##name.Reset();
+  BUILTIN_LIST_C(RESET_COUNTER)
+#undef RESET_COUNTER
+}
+
+RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
+                                             RuntimeCallCounter* counter)
+    : isolate_(isolate),
+      timer_(counter,
+             isolate->counters()->runtime_call_stats()->current_timer()) {
+  if (!FLAG_runtime_call_stats) return;
+  isolate->counters()->runtime_call_stats()->Enter(&timer_);
+}
+
+RuntimeCallTimerScope::~RuntimeCallTimerScope() {
+  if (!FLAG_runtime_call_stats) return;
+  isolate_->counters()->runtime_call_stats()->Leave(&timer_);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/counters.h b/src/counters.h
index d8a3f09..a417da3 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -9,8 +9,10 @@
 #include "src/allocation.h"
 #include "src/base/platform/elapsed-timer.h"
 #include "src/base/platform/time.h"
+#include "src/builtins.h"
 #include "src/globals.h"
 #include "src/objects.h"
+#include "src/runtime/runtime.h"
 
 namespace v8 {
 namespace internal {
@@ -196,6 +198,8 @@
     lookup_done_ = false;
   }
 
+  const char* name() { return name_; }
+
  protected:
   // Returns the handle to the histogram.
   void* GetHistogram() {
@@ -206,7 +210,6 @@
     return histogram_;
   }
 
-  const char* name() { return name_; }
   Isolate* isolate() const { return isolate_; }
 
  private:
@@ -328,8 +331,9 @@
   base::TimeDelta time_;
 };
 
-
-// A helper class for use with AggregatableHistogramTimer.
+// A helper class for use with AggregatableHistogramTimer. This is the
+// // outer-most timer scope used with an AggregatableHistogramTimer. It will
+// // aggregate the information from the inner AggregatedHistogramTimerScope.
 class AggregatingHistogramTimerScope {
  public:
   explicit AggregatingHistogramTimerScope(AggregatableHistogramTimer* histogram)
@@ -342,8 +346,8 @@
   AggregatableHistogramTimer* histogram_;
 };
 
-
-// A helper class for use with AggregatableHistogramTimer.
+// A helper class for use with AggregatableHistogramTimer, the "inner" scope
+// // which defines the events to be timed.
 class AggregatedHistogramTimerScope {
  public:
   explicit AggregatedHistogramTimerScope(AggregatableHistogramTimer* histogram)
@@ -475,6 +479,91 @@
          value * ((current_ms - last_ms_) / interval_ms);
 }
 
+struct RuntimeCallCounter {
+  explicit RuntimeCallCounter(const char* name) : name(name) {}
+  void Reset();
+
+  const char* name;
+  int64_t count = 0;
+  base::TimeDelta time;
+};
+
+// RuntimeCallTimer is used to keep track of the stack of currently active
+// timers used for properly measuring the own time of a RuntimeCallCounter.
+class RuntimeCallTimer {
+ public:
+  RuntimeCallTimer(RuntimeCallCounter* counter, RuntimeCallTimer* parent)
+      : counter_(counter), parent_(parent) {}
+
+  inline void Start() {
+    timer_.Start();
+    counter_->count++;
+  }
+
+  inline RuntimeCallTimer* Stop() {
+    base::TimeDelta delta = timer_.Elapsed();
+    counter_->time += delta;
+    if (parent_ != NULL) {
+      parent_->AdjustForSubTimer(delta);
+    }
+    return parent_;
+  }
+
+  void AdjustForSubTimer(base::TimeDelta delta) { counter_->time -= delta; }
+
+ private:
+  RuntimeCallCounter* counter_;
+  RuntimeCallTimer* parent_;
+  base::ElapsedTimer timer_;
+};
+
+struct RuntimeCallStats {
+  // Dummy counter for the unexpected stub miss.
+  RuntimeCallCounter UnexpectedStubMiss =
+      RuntimeCallCounter("UnexpectedStubMiss");
+  // Counter for runtime callbacks into JavaScript.
+  RuntimeCallCounter ExternalCallback = RuntimeCallCounter("ExternalCallback");
+#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
+  RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name);
+  FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
+#undef CALL_RUNTIME_COUNTER
+#define CALL_BUILTIN_COUNTER(name, type) \
+  RuntimeCallCounter Builtin_##name = RuntimeCallCounter(#name);
+  BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+
+  // Counter to track recursive time events.
+  RuntimeCallTimer* current_timer_ = NULL;
+
+  // Starting measuring the time for a function. This will establish the
+  // connection to the parent counter for properly calculating the own times.
+  void Enter(RuntimeCallCounter* counter);
+  void Enter(RuntimeCallTimer* timer);
+  // Leave a scope for a measured runtime function. This will properly add
+  // the time delta to the current_counter and subtract the delta from its
+  // parent.
+  void Leave();
+  void Leave(RuntimeCallTimer* timer);
+
+  RuntimeCallTimer* current_timer() { return current_timer_; }
+
+  void Reset();
+  void Print(std::ostream& os);
+
+  RuntimeCallStats() { Reset(); }
+};
+
+// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
+// the time of C++ scope.
+class RuntimeCallTimerScope {
+ public:
+  explicit RuntimeCallTimerScope(Isolate* isolate, RuntimeCallCounter* counter);
+  ~RuntimeCallTimerScope();
+
+ private:
+  Isolate* isolate_;
+  RuntimeCallTimer timer_;
+};
 
 #define HISTOGRAM_RANGE_LIST(HR)                                              \
   /* Generic range histograms */                                              \
@@ -563,7 +652,9 @@
   SC(global_handles, V8.GlobalHandles)                                \
   /* OS Memory allocated */                                           \
   SC(memory_allocated, V8.OsMemoryAllocated)                          \
-  SC(normalized_maps, V8.NormalizedMaps)                              \
+  SC(maps_normalized, V8.MapsNormalized)                            \
+  SC(maps_created, V8.MapsCreated)                                  \
+  SC(elements_transitions, V8.ObjectElementsTransitions)            \
   SC(props_to_dictionary, V8.ObjectPropertiesToDictionary)            \
   SC(elements_to_dictionary, V8.ObjectElementsToDictionary)           \
   SC(alive_after_last_gc, V8.AliveAfterLastGC)                        \
@@ -572,11 +663,7 @@
   SC(string_table_capacity, V8.StringTableCapacity)                   \
   SC(number_of_symbols, V8.NumberOfSymbols)                           \
   SC(script_wrappers, V8.ScriptWrappers)                              \
-  SC(call_initialize_stubs, V8.CallInitializeStubs)                   \
-  SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs)           \
-  SC(call_normal_stubs, V8.CallNormalStubs)                           \
-  SC(call_megamorphic_stubs, V8.CallMegamorphicStubs)                 \
-  SC(inlined_copied_elements, V8.InlinedCopiedElements)              \
+  SC(inlined_copied_elements, V8.InlinedCopiedElements)               \
   SC(arguments_adaptors, V8.ArgumentsAdaptors)                        \
   SC(compilation_cache_hits, V8.CompilationCacheHits)                 \
   SC(compilation_cache_misses, V8.CompilationCacheMisses)             \
@@ -588,8 +675,6 @@
   SC(total_parse_size, V8.TotalParseSize)                             \
   /* Amount of source code skipped over using preparsing. */          \
   SC(total_preparse_skipped, V8.TotalPreparseSkipped)                 \
-  /* Number of symbol lookups skipped using preparsing */             \
-  SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped)   \
   /* Amount of compiled source code. */                               \
   SC(total_compile_size, V8.TotalCompileSize)                         \
   /* Amount of source code compiled with the full codegen. */         \
@@ -602,7 +687,6 @@
   SC(pc_to_code, V8.PcToCode)                                         \
   SC(pc_to_code_cached, V8.PcToCodeCached)                            \
   /* The store-buffer implementation of the write barrier. */         \
-  SC(store_buffer_compactions, V8.StoreBufferCompactions)             \
   SC(store_buffer_overflows, V8.StoreBufferOverflows)
 
 
@@ -619,40 +703,22 @@
      V8.GCCompactorCausedByOldspaceExhaustion)                                 \
   SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                            \
   SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)                  \
-  /* How is the generic keyed-load stub used? */                               \
-  SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                           \
-  SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)                     \
-  SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache)          \
-  SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow)                         \
-  SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs)               \
-  SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow)            \
-  /* How is the generic keyed-call stub used? */                               \
-  SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast)                  \
-  SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict)                  \
-  SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache)          \
-  SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict)            \
-  SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow)                         \
-  SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad)                \
-  SC(named_load_global_stub, V8.NamedLoadGlobalStub)                           \
-  SC(named_store_global_inline, V8.NamedStoreGlobalInline)                     \
-  SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss)            \
-  SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs)             \
-  SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow)          \
-  SC(store_normal_miss, V8.StoreNormalMiss)                                    \
-  SC(store_normal_hit, V8.StoreNormalHit)                                      \
-  SC(cow_arrays_created_stub, V8.COWArraysCreatedStub)                         \
+  SC(ic_keyed_load_generic_smi, V8.ICKeyedLoadGenericSmi)                      \
+  SC(ic_keyed_load_generic_symbol, V8.ICKeyedLoadGenericSymbol)                \
+  SC(ic_keyed_load_generic_slow, V8.ICKeyedLoadGenericSlow)                    \
+  SC(ic_named_load_global_stub, V8.ICNamedLoadGlobalStub)                      \
+  SC(ic_store_normal_miss, V8.ICStoreNormalMiss)                               \
+  SC(ic_store_normal_hit, V8.ICStoreNormalHit)                                 \
+  SC(ic_binary_op_miss, V8.ICBinaryOpMiss)                                     \
+  SC(ic_compare_miss, V8.ICCompareMiss)                                        \
+  SC(ic_call_miss, V8.ICCallMiss)                                              \
+  SC(ic_keyed_call_miss, V8.ICKeyedCallMiss)                                   \
+  SC(ic_load_miss, V8.ICLoadMiss)                                              \
+  SC(ic_keyed_load_miss, V8.ICKeyedLoadMiss)                                   \
+  SC(ic_store_miss, V8.ICStoreMiss)                                            \
+  SC(ic_keyed_store_miss, V8.ICKeyedStoreMiss)                                 \
   SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime)                   \
   SC(cow_arrays_converted, V8.COWArraysConverted)                              \
-  SC(call_miss, V8.CallMiss)                                                   \
-  SC(keyed_call_miss, V8.KeyedCallMiss)                                        \
-  SC(load_miss, V8.LoadMiss)                                                   \
-  SC(keyed_load_miss, V8.KeyedLoadMiss)                                        \
-  SC(call_const, V8.CallConst)                                                 \
-  SC(call_const_fast_api, V8.CallConstFastApi)                                 \
-  SC(call_const_interceptor, V8.CallConstInterceptor)                          \
-  SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi)          \
-  SC(call_global_inline, V8.CallGlobalInline)                                  \
-  SC(call_global_inline_miss, V8.CallGlobalInlineMiss)                         \
   SC(constructed_objects, V8.ConstructedObjects)                               \
   SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime)                \
   SC(negative_lookups, V8.NegativeLookups)                                     \
@@ -660,8 +726,6 @@
   SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes)             \
   SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses)             \
   SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates)           \
-  SC(array_function_runtime, V8.ArrayFunctionRuntime)                          \
-  SC(array_function_native, V8.ArrayFunctionNative)                            \
   SC(enum_cache_hits, V8.EnumCacheHits)                                        \
   SC(enum_cache_misses, V8.EnumCacheMisses)                                    \
   SC(fast_new_closure_total, V8.FastNewClosureTotal)                           \
@@ -672,26 +736,26 @@
   SC(string_add_runtime_ext_to_one_byte, V8.StringAddRuntimeExtToOneByte)      \
   SC(sub_string_runtime, V8.SubStringRuntime)                                  \
   SC(sub_string_native, V8.SubStringNative)                                    \
-  SC(string_add_make_two_char, V8.StringAddMakeTwoChar)                        \
   SC(string_compare_native, V8.StringCompareNative)                            \
   SC(string_compare_runtime, V8.StringCompareRuntime)                          \
   SC(regexp_entry_runtime, V8.RegExpEntryRuntime)                              \
   SC(regexp_entry_native, V8.RegExpEntryNative)                                \
   SC(number_to_string_native, V8.NumberToStringNative)                         \
   SC(number_to_string_runtime, V8.NumberToStringRuntime)                       \
-  SC(math_acos, V8.MathAcos)                                                   \
-  SC(math_asin, V8.MathAsin)                                                   \
-  SC(math_atan, V8.MathAtan)                                                   \
-  SC(math_atan2, V8.MathAtan2)                                                 \
-  SC(math_clz32, V8.MathClz32)                                                 \
-  SC(math_exp, V8.MathExp)                                                     \
-  SC(math_floor, V8.MathFloor)                                                 \
-  SC(math_log, V8.MathLog)                                                     \
-  SC(math_pow, V8.MathPow)                                                     \
-  SC(math_round, V8.MathRound)                                                 \
-  SC(math_sqrt, V8.MathSqrt)                                                   \
+  SC(math_acos_runtime, V8.MathAcosRuntime)                                    \
+  SC(math_asin_runtime, V8.MathAsinRuntime)                                    \
+  SC(math_atan_runtime, V8.MathAtanRuntime)                                    \
+  SC(math_atan2_runtime, V8.MathAtan2Runtime)                                  \
+  SC(math_clz32_runtime, V8.MathClz32Runtime)                                  \
+  SC(math_exp_runtime, V8.MathExpRuntime)                                      \
+  SC(math_floor_runtime, V8.MathFloorRuntime)                                  \
+  SC(math_log_runtime, V8.MathLogRuntime)                                      \
+  SC(math_pow_runtime, V8.MathPowRuntime)                                      \
+  SC(math_round_runtime, V8.MathRoundRuntime)                                  \
+  SC(math_sqrt_runtime, V8.MathSqrtRuntime)                                    \
   SC(stack_interrupts, V8.StackInterrupts)                                     \
   SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks)                          \
+  SC(runtime_calls, V8.RuntimeCalls)                          \
   SC(bounds_checks_eliminated, V8.BoundsChecksEliminated)                      \
   SC(bounds_checks_hoisted, V8.BoundsChecksHoisted)                            \
   SC(soft_deopts_requested, V8.SoftDeoptsRequested)                            \
@@ -718,8 +782,11 @@
   SC(turbo_escape_allocs_replaced, V8.TurboEscapeAllocsReplaced)               \
   SC(crankshaft_escape_allocs_replaced, V8.CrankshaftEscapeAllocsReplaced)     \
   SC(turbo_escape_loads_replaced, V8.TurboEscapeLoadsReplaced)                 \
-  SC(crankshaft_escape_loads_replaced, V8.CrankshaftEscapeLoadsReplaced)
-
+  SC(crankshaft_escape_loads_replaced, V8.CrankshaftEscapeLoadsReplaced)       \
+  /* Total code size (including metadata) of baseline code or bytecode. */     \
+  SC(total_baseline_code_size, V8.TotalBaselineCodeSize)                       \
+  /* Total count of functions compiled using the baseline compiler. */         \
+  SC(total_baseline_compile_count, V8.TotalBaselineCompileCount)
 
 // This file contains all the v8 counters that are in use.
 class Counters {
@@ -831,6 +898,7 @@
 
   void ResetCounters();
   void ResetHistograms();
+  RuntimeCallStats* runtime_call_stats() { return &runtime_call_stats_; }
 
  private:
 #define HR(name, caption, min, max, num_buckets) Histogram name##_;
@@ -892,6 +960,8 @@
   CODE_AGE_LIST_COMPLETE(SC)
 #undef SC
 
+  RuntimeCallStats runtime_call_stats_;
+
   friend class Isolate;
 
   explicit Counters(Isolate* isolate);
diff --git a/src/crankshaft/arm/lithium-arm.cc b/src/crankshaft/arm/lithium-arm.cc
index cd736ec..d5590f5 100644
--- a/src/crankshaft/arm/lithium-arm.cc
+++ b/src/crankshaft/arm/lithium-arm.cc
@@ -383,8 +383,8 @@
 
 int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
   // Skip a slot if for a double-width slot.
-  if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
-  return spill_slot_count_++;
+  if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
+  return current_frame_slots_++;
 }
 
 
@@ -1743,14 +1743,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
-    HCompareMinusZeroAndBranch* instr) {
-  LOperand* value = UseRegister(instr->value());
-  LOperand* scratch = TempRegister();
-  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1819,12 +1811,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
-  LOperand* map = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
 LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
   LOperand* string = UseRegisterAtStart(instr->string());
   LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2449,8 +2435,7 @@
     return DefineAsSpilled(result, spill_index);
   } else {
     DCHECK(info()->IsStub());
-    CallInterfaceDescriptor descriptor =
-        info()->code_stub()->GetCallInterfaceDescriptor();
+    CallInterfaceDescriptor descriptor = graph()->descriptor();
     int index = static_cast<int>(instr->index());
     Register reg = descriptor.GetRegisterParameter(index);
     return DefineFixed(result, reg);
@@ -2471,17 +2456,12 @@
       Retry(kTooManySpillSlotsNeededForOSR);
       spill_index = 0;
     }
+    spill_index += StandardFrameConstants::kFixedSlotCount;
   }
   return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
 
 
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), r0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
   // There are no real uses of the arguments object.
   // arguments.length and element access are supported directly on
@@ -2622,15 +2602,5 @@
   return new(zone()) LStoreFrameContext(context);
 }
 
-
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
-    HAllocateBlockContext* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LAllocateBlockContext* result =
-      new(zone()) LAllocateBlockContext(context, function);
-  return MarkAsCall(DefineFixed(result, cp), instr);
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/crankshaft/arm/lithium-arm.h b/src/crankshaft/arm/lithium-arm.h
index 6329f36..91435cf 100644
--- a/src/crankshaft/arm/lithium-arm.h
+++ b/src/crankshaft/arm/lithium-arm.h
@@ -21,7 +21,6 @@
   V(AccessArgumentsAt)                       \
   V(AddI)                                    \
   V(Allocate)                                \
-  V(AllocateBlockContext)                    \
   V(ApplyArguments)                          \
   V(ArgumentsElements)                       \
   V(ArgumentsLength)                         \
@@ -35,7 +34,6 @@
   V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
-  V(CallStub)                                \
   V(CheckArrayBufferNotNeutered)             \
   V(CheckInstanceType)                       \
   V(CheckNonSmi)                             \
@@ -47,7 +45,6 @@
   V(ClampIToUint8)                           \
   V(ClampTToUint8)                           \
   V(ClassOfTestAndBranch)                    \
-  V(CompareMinusZeroAndBranch)               \
   V(CompareNumericAndBranch)                 \
   V(CmpObjectEqAndBranch)                    \
   V(CmpHoleAndBranch)                        \
@@ -101,7 +98,6 @@
   V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
   V(LoadNamedGeneric)                        \
-  V(MapEnumLength)                           \
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathExp)                                 \
@@ -460,19 +456,6 @@
 };
 
 
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallStub(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
-  DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
 class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
  public:
   bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -992,22 +975,6 @@
 };
 
 
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
-  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
-                               "cmp-minus-zero-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
 class LIsStringAndBranch final : public LControlInstruction<1, 1> {
  public:
   LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1158,8 +1125,6 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
   DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
 
-  Strength strength() { return hydrogen()->strength(); }
-
   Token::Value op() const { return hydrogen()->token(); }
 };
 
@@ -1361,18 +1326,6 @@
 };
 
 
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMapEnumLength(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
 class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1498,8 +1451,6 @@
 
   DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
 
-  Strength strength() { return hydrogen()->strength(); }
-
  private:
   Token::Value op_;
 };
@@ -2616,23 +2567,6 @@
 };
 
 
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
-  LAllocateBlockContext(LOperand* context, LOperand* function) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-
-  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/arm/lithium-codegen-arm.cc b/src/crankshaft/arm/lithium-codegen-arm.cc
index 2bd0788..7b2ebad 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -59,7 +59,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   DCHECK(is_done());
-  code->set_stack_slots(GetStackSlotCount());
+  code->set_stack_slots(GetTotalFrameSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
 }
@@ -103,13 +103,6 @@
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-      __ stop("stop_at");
-    }
-#endif
-
     // r1: Callee's JS function.
     // cp: Callee's context.
     // pp: Callee's constant pool pointer (if enabled)
@@ -382,7 +375,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   DCHECK(is_done());
-  safepoints_.Emit(masm(), GetStackSlotCount());
+  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
   return !is_aborted();
 }
 
@@ -554,7 +547,7 @@
   DCHECK(!op->IsDoubleRegister());
   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return MemOperand(fp, StackSlotOffset(op->index()));
+    return MemOperand(fp, FrameSlotToFPOffset(op->index()));
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -566,7 +559,7 @@
 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
   DCHECK(op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+    return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -635,9 +628,6 @@
 
   if (op->IsStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     if (is_tagged) {
       translation->StoreStackSlot(index);
     } else if (is_uint32) {
@@ -647,9 +637,6 @@
     }
   } else if (op->IsDoubleStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     translation->StoreDoubleStackSlot(index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -979,26 +966,6 @@
 }
 
 
-void LCodeGen::DoCallStub(LCallStub* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->result()).is(r0));
-  switch (instr->hydrogen()->major_key()) {
-    case CodeStub::RegExpExec: {
-      RegExpExecStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::SubString: {
-      SubStringStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   GenerateOsrPrologue();
 }
@@ -1803,13 +1770,6 @@
 }
 
 
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
-  Register result = ToRegister(instr->result());
-  Register map = ToRegister(instr->value());
-  __ EnumLength(result, map);
-}
-
-
 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
                                            LOperand* index,
                                            String::Encoding encoding) {
@@ -1946,8 +1906,14 @@
     // At this point, both left and right are either 0 or -0.
     if (operation == HMathMinMax::kMathMin) {
       // We could use a single 'vorr' instruction here if we had NEON support.
+      // The algorithm is: -((-L) + (-R)), which in case of L and R being
+      // different registers is most efficiently expressed as -((-L) - R).
       __ vneg(left_reg, left_reg);
-      __ vsub(result_reg, left_reg, right_reg);
+      if (left_reg.is(right_reg)) {
+        __ vadd(result_reg, left_reg, right_reg);
+      } else {
+        __ vsub(result_reg, left_reg, right_reg);
+      }
       __ vneg(result_reg, result_reg);
     } else {
       // Since we operate on +0 and/or -0, vadd and vand have the same effect;
@@ -2014,8 +1980,7 @@
   DCHECK(ToRegister(instr->right()).is(r0));
   DCHECK(ToRegister(instr->result()).is(r0));
 
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   // Block literal pool emission to ensure nop indicating no inlined smi code
   // is in the correct position.
   Assembler::BlockConstPoolScope block_const_pool(masm());
@@ -2260,8 +2225,9 @@
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
-        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+                         ? instr->TrueDestination(chunk_)
+                         : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
@@ -2326,33 +2292,6 @@
 }
 
 
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
-  Representation rep = instr->hydrogen()->value()->representation();
-  DCHECK(!rep.IsInteger32());
-  Register scratch = ToRegister(instr->temp());
-
-  if (rep.IsDouble()) {
-    DwVfpRegister value = ToDoubleRegister(instr->value());
-    __ VFPCompareAndSetFlags(value, 0.0);
-    EmitFalseBranch(instr, ne);
-    __ VmovHigh(scratch, value);
-    __ cmp(scratch, Operand(0x80000000));
-  } else {
-    Register value = ToRegister(instr->value());
-    __ CheckMap(value,
-                scratch,
-                Heap::kHeapNumberMapRootIndex,
-                instr->FalseLabel(chunk()),
-                DO_SMI_CHECK);
-    __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
-    __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-    __ cmp(scratch, Operand(0x80000000));
-    __ cmp(ip, Operand(0x00000000), eq);
-  }
-  EmitBranch(instr, eq);
-}
-
-
 Condition LCodeGen::EmitIsString(Register input,
                                  Register temp1,
                                  Label* is_not_string,
@@ -2616,8 +2555,7 @@
   DCHECK(ToRegister(instr->context()).is(cp));
   Token::Value op = instr->op();
 
-  Handle<Code> ic =
-      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   // This instruction also signals no smi code inlined.
   __ cmp(r0, Operand::Zero());
@@ -2707,9 +2645,9 @@
 
   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
-                                         SLOPPY, PREMONOMORPHIC).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2804,10 +2742,10 @@
   // Name is always in r2.
   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(
-          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
-          instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), NOT_INSIDE_TYPEOF,
+                        instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
 }
 
@@ -2952,6 +2890,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -3103,8 +3044,8 @@
   }
 
   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
 }
 
@@ -3718,21 +3659,22 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  HCallFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->function()).is(r1));
   DCHECK(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
-  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
-  if (instr->hydrogen()->HasVectorAndSlot()) {
+  ConvertReceiverMode mode = hinstr->convert_mode();
+  if (hinstr->HasVectorAndSlot()) {
     Register slot_register = ToRegister(instr->temp_slot());
     Register vector_register = ToRegister(instr->temp_vector());
     DCHECK(slot_register.is(r3));
     DCHECK(vector_register.is(r2));
 
     AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-    int index = vector->GetIndex(instr->hydrogen()->slot());
+    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+    int index = vector->GetIndex(hinstr->slot());
 
     __ Move(vector_register, vector);
     __ mov(slot_register, Operand(Smi::FromInt(index)));
@@ -4018,6 +3960,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -5271,8 +5216,8 @@
     final_branch_condition = eq;
 
   } else if (String::Equals(type_name, factory->undefined_string())) {
-    __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
-    __ b(eq, true_label);
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    __ b(eq, false_label);
     __ JumpIfSmi(input, false_label);
     // Check for undetectable objects => true.
     __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -5450,17 +5395,8 @@
 
 
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
-  __ SmiTst(r0);
-  DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
-
-  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
-  __ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
-  DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
-
   Label use_cache, call_runtime;
-  Register null_value = r5;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ CheckEnumCache(null_value, &call_runtime);
+  __ CheckEnumCache(&call_runtime);
 
   __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ b(&use_cache);
@@ -5468,12 +5404,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(r0);
-  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
-  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
-  __ cmp(r1, ip);
-  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+  CallRuntime(Runtime::kForInEnumerate, instr);
   __ bind(&use_cache);
 }
 
@@ -5592,15 +5523,6 @@
 }
 
 
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
-  Handle<ScopeInfo> scope_info = instr->scope_info();
-  __ Push(scope_info);
-  __ push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kPushBlockContext, instr);
-  RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/arm/lithium-codegen-arm.h b/src/crankshaft/arm/lithium-codegen-arm.h
index 24a083f..67925cc 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.h
+++ b/src/crankshaft/arm/lithium-codegen-arm.h
@@ -44,10 +44,8 @@
   }
 
   bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 ||
-        info()->is_non_deferred_calling() ||
-        !info()->IsStub() ||
-        info()->requires_frame();
+    return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+           !info()->IsStub() || info()->requires_frame();
   }
   bool NeedsDeferredFrame() const {
     return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -153,7 +151,13 @@
                        Register temporary,
                        Register temporary2);
 
-  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  bool HasAllocatedStackSlots() const {
+    return chunk()->HasAllocatedStackSlots();
+  }
+  int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+  int GetTotalFrameSlotCount() const {
+    return chunk()->GetTotalFrameSlotCount();
+  }
 
   void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
 
diff --git a/src/crankshaft/arm64/lithium-arm64.cc b/src/crankshaft/arm64/lithium-arm64.cc
index 3f43338..c5d4208 100644
--- a/src/crankshaft/arm64/lithium-arm64.cc
+++ b/src/crankshaft/arm64/lithium-arm64.cc
@@ -527,11 +527,7 @@
   return operand;
 }
 
-
-int LPlatformChunk::GetNextSpillIndex() {
-  return spill_slot_count_++;
-}
-
+int LPlatformChunk::GetNextSpillIndex() { return current_frame_slots_++; }
 
 LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
   int index = GetNextSpillIndex();
@@ -1084,12 +1080,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), x0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
   instr->ReplayEnvironment(current_block_->last_environment());
 
@@ -1567,14 +1557,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
-    HCompareMinusZeroAndBranch* instr) {
-  LOperand* value = UseRegister(instr->value());
-  LOperand* scratch = TempRegister();
-  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1743,12 +1725,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
-  LOperand* map = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
 LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
   DCHECK(instr->representation().IsInteger32());
   DCHECK(instr->left()->representation().Equals(instr->representation()));
@@ -1966,8 +1942,7 @@
     return DefineAsSpilled(result, spill_index);
   } else {
     DCHECK(info()->IsStub());
-    CallInterfaceDescriptor descriptor =
-        info()->code_stub()->GetCallInterfaceDescriptor();
+    CallInterfaceDescriptor descriptor = graph()->descriptor();
     int index = static_cast<int>(instr->index());
     Register reg = descriptor.GetRegisterParameter(index);
     return DefineFixed(result, reg);
@@ -2662,6 +2637,7 @@
       Retry(kTooManySpillSlotsNeededForOSR);
       spill_index = 0;
     }
+    spill_index += StandardFrameConstants::kFixedSlotCount;
   }
   return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
@@ -2719,15 +2695,5 @@
 }
 
 
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
-    HAllocateBlockContext* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LAllocateBlockContext* result =
-      new(zone()) LAllocateBlockContext(context, function);
-  return MarkAsCall(DefineFixed(result, cp), instr);
-}
-
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/crankshaft/arm64/lithium-arm64.h b/src/crankshaft/arm64/lithium-arm64.h
index 1b627d1..14abeb0 100644
--- a/src/crankshaft/arm64/lithium-arm64.h
+++ b/src/crankshaft/arm64/lithium-arm64.h
@@ -23,7 +23,6 @@
   V(AddI)                                    \
   V(AddS)                                    \
   V(Allocate)                                \
-  V(AllocateBlockContext)                    \
   V(ApplyArguments)                          \
   V(ArgumentsElements)                       \
   V(ArgumentsLength)                         \
@@ -37,7 +36,6 @@
   V(CallJSFunction)                          \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
-  V(CallStub)                                \
   V(CallWithDescriptor)                      \
   V(CheckArrayBufferNotNeutered)             \
   V(CheckInstanceType)                       \
@@ -55,7 +53,6 @@
   V(CmpMapAndBranch)                         \
   V(CmpObjectEqAndBranch)                    \
   V(CmpT)                                    \
-  V(CompareMinusZeroAndBranch)               \
   V(CompareNumericAndBranch)                 \
   V(ConstantD)                               \
   V(ConstantE)                               \
@@ -106,7 +103,6 @@
   V(LoadNamedField)                          \
   V(LoadNamedGeneric)                        \
   V(LoadRoot)                                \
-  V(MapEnumLength)                           \
   V(MathAbs)                                 \
   V(MathAbsTagged)                           \
   V(MathClz32)                               \
@@ -718,8 +714,6 @@
 
   DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
 
-  Strength strength() { return hydrogen()->strength(); }
-
  private:
   Token::Value op_;
 };
@@ -887,19 +881,6 @@
 };
 
 
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallStub(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
-  DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
 class LCheckArrayBufferNotNeutered final
     : public LTemplateInstruction<0, 1, 0> {
  public:
@@ -1141,28 +1122,10 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
   DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
 
-  Strength strength() { return hydrogen()->strength(); }
-
   Token::Value op() const { return hydrogen()->token(); }
 };
 
 
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
-  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
-                               "cmp-minus-zero-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
 class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
  public:
   LCompareNumericAndBranch(LOperand* left, LOperand* right) {
@@ -1791,18 +1754,6 @@
 };
 
 
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMapEnumLength(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
 template<int T>
 class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
  public:
@@ -2949,23 +2900,6 @@
 };
 
 
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
-  LAllocateBlockContext(LOperand* context, LOperand* function) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-
-  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
 class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
  public:
   LWrapReceiver(LOperand* receiver, LOperand* function) {
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.cc b/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 571bc15..6399a8b 100644
--- a/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -277,9 +277,6 @@
 
   if (op->IsStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     if (is_tagged) {
       translation->StoreStackSlot(index);
     } else if (is_uint32) {
@@ -289,9 +286,6 @@
     }
   } else if (op->IsDoubleStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     translation->StoreDoubleStackSlot(index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -366,21 +360,22 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  HCallFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->function()).Is(x1));
   DCHECK(ToRegister(instr->result()).Is(x0));
 
   int arity = instr->arity();
-  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
-  if (instr->hydrogen()->HasVectorAndSlot()) {
+  ConvertReceiverMode mode = hinstr->convert_mode();
+  if (hinstr->HasVectorAndSlot()) {
     Register slot_register = ToRegister(instr->temp_slot());
     Register vector_register = ToRegister(instr->temp_vector());
     DCHECK(slot_register.is(x3));
     DCHECK(vector_register.is(x2));
 
     AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-    int index = vector->GetIndex(instr->hydrogen()->slot());
+    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+    int index = vector->GetIndex(hinstr->slot());
 
     __ Mov(vector_register, vector);
     __ Mov(slot_register, Operand(Smi::FromInt(index)));
@@ -392,7 +387,7 @@
     __ Mov(x0, arity);
     CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
   }
-  RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
+  RecordPushedArgumentsDelta(hinstr->argument_delta());
 }
 
 
@@ -605,13 +600,6 @@
 
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info()->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-      __ Debug("stop-at", __LINE__, BREAK);
-    }
-#endif
   }
 
   DCHECK(__ StackPointer().Is(jssp));
@@ -870,14 +858,14 @@
   // We do not know how much data will be emitted for the safepoint table, so
   // force emission of the veneer pool.
   masm()->CheckVeneerPool(true, true);
-  safepoints_.Emit(masm(), GetStackSlotCount());
+  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
   return !is_aborted();
 }
 
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   DCHECK(is_done());
-  code->set_stack_slots(GetStackSlotCount());
+  code->set_stack_slots(GetTotalFrameSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
 }
@@ -1161,7 +1149,7 @@
   DCHECK(!op->IsDoubleRegister());
   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    int fp_offset = StackSlotOffset(op->index());
+    int fp_offset = FrameSlotToFPOffset(op->index());
     // Loads and stores have a bigger reach in positive offset than negative.
     // We try to access using jssp (positive offset) first, then fall back to
     // fp (negative offset) if that fails.
@@ -1178,8 +1166,8 @@
     if ((stack_mode == kCanUseStackPointer) &&
         !info()->saves_caller_doubles()) {
       int jssp_offset_to_fp =
-          StandardFrameConstants::kFixedFrameSizeFromFp +
-          (pushed_arguments_ + GetStackSlotCount()) * kPointerSize;
+          (pushed_arguments_ + GetTotalFrameSlotCount()) * kPointerSize -
+          StandardFrameConstants::kFixedFrameSizeAboveFp;
       int jssp_offset = fp_offset + jssp_offset_to_fp;
       if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
         return MemOperand(masm()->StackPointer(), jssp_offset);
@@ -1673,8 +1661,7 @@
   DCHECK(ToRegister(instr->right()).is(x0));
   DCHECK(ToRegister(instr->result()).is(x0));
 
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -1998,27 +1985,6 @@
 }
 
 
-void LCodeGen::DoCallStub(LCallStub* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->result()).is(x0));
-  switch (instr->hydrogen()->major_key()) {
-    case CodeStub::RegExpExec: {
-      RegExpExecStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::SubString: {
-      SubStringStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-  RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
-}
-
-
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   GenerateOsrPrologue();
 }
@@ -2330,24 +2296,6 @@
 }
 
 
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
-  Representation rep = instr->hydrogen()->value()->representation();
-  DCHECK(!rep.IsInteger32());
-  Register scratch = ToRegister(instr->temp());
-
-  if (rep.IsDouble()) {
-    __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
-                       instr->TrueLabel(chunk()));
-  } else {
-    Register value = ToRegister(instr->value());
-    __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
-    __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
-    __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
-  }
-  EmitGoto(instr->FalseDestination(chunk()));
-}
-
-
 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
@@ -2360,8 +2308,9 @@
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
-        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+                         ? instr->TrueDestination(chunk_)
+                         : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
@@ -2422,8 +2371,7 @@
 
   DCHECK(ToRegister(instr->left()).Is(x1));
   DCHECK(ToRegister(instr->right()).Is(x0));
-  Handle<Code> ic =
-      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   // Signal that we don't inline smi code before this stub.
   InlineSmiCheckInfo::EmitNotInlined(masm());
@@ -2715,20 +2663,12 @@
 
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   Register object = ToRegister(instr->object());
-  Register null_value = x5;
 
   DCHECK(instr->IsMarkedAsCall());
   DCHECK(object.Is(x0));
 
-  DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
-
-  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
-  __ CompareObjectType(object, x1, x1, JS_PROXY_TYPE);
-  DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
-
   Label use_cache, call_runtime;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
+  __ CheckEnumCache(object, x5, x1, x2, x3, x4, &call_runtime);
 
   __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
   __ B(&use_cache);
@@ -2736,12 +2676,7 @@
   // Get the set of properties to enumerate.
   __ Bind(&call_runtime);
   __ Push(object);
-  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
-  __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
-                      Deoptimizer::kWrongMap);
-
+  CallRuntime(Runtime::kForInEnumerate, instr);
   __ Bind(&use_cache);
 }
 
@@ -3084,9 +3019,9 @@
   DCHECK(ToRegister(instr->result()).Is(x0));
   __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
-                                         SLOPPY, PREMONOMORPHIC).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3194,6 +3129,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -3346,8 +3284,8 @@
   }
 
   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   DCHECK(ToRegister(instr->result()).Is(x0));
@@ -3401,10 +3339,10 @@
   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(
-          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
-          instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), NOT_INSIDE_TYPEOF,
+                        instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   DCHECK(ToRegister(instr->result()).is(x0));
@@ -3417,13 +3355,6 @@
 }
 
 
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
-  Register result = ToRegister(instr->result());
-  Register map = ToRegister(instr->value());
-  __ EnumLengthSmi(result, map);
-}
-
-
 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsDouble()) {
@@ -4889,6 +4820,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -5578,7 +5512,7 @@
     DCHECK(instr->temp1() != NULL);
     Register scratch = ToRegister(instr->temp1());
 
-    __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
+    __ JumpIfRoot(value, Heap::kNullValueRootIndex, false_label);
     __ JumpIfSmi(value, false_label);
     // Check for undetectable objects and jump to the true branch in this case.
     __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
@@ -5775,14 +5709,5 @@
 }
 
 
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
-  Handle<ScopeInfo> scope_info = instr->scope_info();
-  __ Push(scope_info);
-  __ Push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kPushBlockContext, instr);
-  RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.h b/src/crankshaft/arm64/lithium-codegen-arm64.h
index 18856da..cf7de10 100644
--- a/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -49,10 +49,8 @@
   }
 
   bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 ||
-        info()->is_non_deferred_calling() ||
-        !info()->IsStub() ||
-        info()->requires_frame();
+    return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+           !info()->IsStub() || info()->requires_frame();
   }
   bool NeedsDeferredFrame() const {
     return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -253,7 +251,13 @@
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                             Safepoint::DeoptMode mode);
 
-  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  bool HasAllocatedStackSlots() const {
+    return chunk()->HasAllocatedStackSlots();
+  }
+  int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+  int GetTotalFrameSlotCount() const {
+    return chunk()->GetTotalFrameSlotCount();
+  }
 
   void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
 
diff --git a/src/crankshaft/hydrogen-alias-analysis.h b/src/crankshaft/hydrogen-alias-analysis.h
index de8d0bd..1f32b7ac 100644
--- a/src/crankshaft/hydrogen-alias-analysis.h
+++ b/src/crankshaft/hydrogen-alias-analysis.h
@@ -44,7 +44,6 @@
 
     // Constant objects can be distinguished statically.
     if (a->IsConstant()) {
-      // TODO(titzer): DataEquals() is more efficient, but that's protected.
       return a->Equals(b) ? kMustAlias : kNoAlias;
     }
     return kMayAlias;
diff --git a/src/crankshaft/hydrogen-instructions.cc b/src/crankshaft/hydrogen-instructions.cc
index e2e026f..a9c6228 100644
--- a/src/crankshaft/hydrogen-instructions.cc
+++ b/src/crankshaft/hydrogen-instructions.cc
@@ -765,7 +765,6 @@
 
 
 bool HInstruction::CanDeoptimize() {
-  // TODO(titzer): make this a virtual method?
   switch (opcode()) {
     case HValue::kAbnormalExit:
     case HValue::kAccessArgumentsAt:
@@ -777,13 +776,11 @@
     case HValue::kBoundsCheckBaseIndexInformation:
     case HValue::kCallFunction:
     case HValue::kCallNewArray:
-    case HValue::kCallStub:
     case HValue::kCapturedObject:
     case HValue::kClassOfTestAndBranch:
     case HValue::kCompareGeneric:
     case HValue::kCompareHoleAndBranch:
     case HValue::kCompareMap:
-    case HValue::kCompareMinusZeroAndBranch:
     case HValue::kCompareNumericAndBranch:
     case HValue::kCompareObjectEqAndBranch:
     case HValue::kConstant:
@@ -811,7 +808,6 @@
     case HValue::kLoadNamedField:
     case HValue::kLoadNamedGeneric:
     case HValue::kLoadRoot:
-    case HValue::kMapEnumLength:
     case HValue::kMathMinMax:
     case HValue::kParameter:
     case HValue::kPhi:
@@ -832,7 +828,6 @@
       return false;
 
     case HValue::kAdd:
-    case HValue::kAllocateBlockContext:
     case HValue::kApplyArguments:
     case HValue::kBitwise:
     case HValue::kBoundsCheck:
@@ -1107,12 +1102,6 @@
 }
 
 
-std::ostream& HAllocateBlockContext::PrintDataTo(
-    std::ostream& os) const {  // NOLINT
-  return os << NameOf(context()) << " " << NameOf(function());
-}
-
-
 std::ostream& HControlInstruction::PrintDataTo(
     std::ostream& os) const {  // NOLINT
   os << " goto (";
@@ -1428,12 +1417,12 @@
 
 // static
 HInstruction* HAdd::New(Isolate* isolate, Zone* zone, HValue* context,
-                        HValue* left, HValue* right, Strength strength,
+                        HValue* left, HValue* right,
                         ExternalAddType external_add_type) {
   // For everything else, you should use the other factory method without
   // ExternalAddType.
   DCHECK_EQ(external_add_type, AddOfExternalAndTagged);
-  return new (zone) HAdd(context, left, right, strength, external_add_type);
+  return new (zone) HAdd(context, left, right, external_add_type);
 }
 
 
@@ -1733,12 +1722,6 @@
 }
 
 
-std::ostream& HCallStub::PrintDataTo(std::ostream& os) const {  // NOLINT
-  os << CodeStub::MajorName(major_key_) << " ";
-  return HUnaryCall::PrintDataTo(os);
-}
-
-
 std::ostream& HUnknownOSRValue::PrintDataTo(std::ostream& os) const {  // NOLINT
   const char* type = "expression";
   if (environment_->is_local_index(index_)) type = "local";
@@ -2733,7 +2716,6 @@
         bit_field_, has_int32_value && Smi::IsValid(int32_value_));
     double_value_ = n;
     bit_field_ = HasDoubleValueField::update(bit_field_, true);
-    // TODO(titzer): if this heap number is new space, tenure a new one.
   }
 
   Initialize(r);
@@ -2906,7 +2888,6 @@
   DCHECK(IsLinked());
   if (block()->graph()->has_osr() &&
       block()->graph()->IsStandardConstant(this)) {
-    // TODO(titzer): this seems like a hack that should be fixed by custom OSR.
     return true;
   }
   if (HasNoUses()) return true;
@@ -2963,6 +2944,8 @@
     res = new (zone) HConstant(std::numeric_limits<double>::quiet_NaN());
   } else if (handle->IsNull()) {
     res = new(zone) HConstant(0);
+  } else if (handle->IsString()) {
+    res = new(zone) HConstant(String::ToNumber(Handle<String>::cast(handle)));
   }
   return res != NULL ? Just(res) : Nothing<HConstant*>();
 }
@@ -3327,31 +3310,6 @@
 }
 
 
-bool HCompareMinusZeroAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
-  if (FLAG_fold_constants && value()->IsConstant()) {
-    HConstant* constant = HConstant::cast(value());
-    if (constant->HasDoubleValue()) {
-      *block = IsMinusZero(constant->DoubleValue())
-          ? FirstSuccessor() : SecondSuccessor();
-      return true;
-    }
-  }
-  if (value()->representation().IsSmiOrInteger32()) {
-    // A Smi or Integer32 cannot contain minus zero.
-    *block = SecondSuccessor();
-    return true;
-  }
-  *block = NULL;
-  return false;
-}
-
-
-void HCompareMinusZeroAndBranch::InferRepresentation(
-    HInferRepresentationPhase* h_infer) {
-  ChangeRepresentation(value()->representation());
-}
-
-
 std::ostream& HGoto::PrintDataTo(std::ostream& os) const {  // NOLINT
   return os << *SuccessorAt(0);
 }
@@ -3388,7 +3346,7 @@
     // (false). Therefore, any comparisons other than ordered relational
     // comparisons must cause a deopt when one of their arguments is undefined.
     // See also v8:1434
-    if (Token::IsOrderedRelationalCompareOp(token_) && !is_strong(strength())) {
+    if (Token::IsOrderedRelationalCompareOp(token_)) {
       SetFlag(kAllowUndefinedAsNaN);
     }
   }
@@ -3955,24 +3913,23 @@
 #define H_CONSTANT_DOUBLE(val) \
   HConstant::New(isolate, zone, context, static_cast<double>(val))
 
-#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op)                      \
-  HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context,    \
-                            HValue* left, HValue* right, Strength strength) { \
-    if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {   \
-      HConstant* c_left = HConstant::cast(left);                              \
-      HConstant* c_right = HConstant::cast(right);                            \
-      if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {          \
-        double double_res = c_left->DoubleValue() op c_right->DoubleValue();  \
-        if (IsInt32Double(double_res)) {                                      \
-          return H_CONSTANT_INT(double_res);                                  \
-        }                                                                     \
-        return H_CONSTANT_DOUBLE(double_res);                                 \
-      }                                                                       \
-    }                                                                         \
-    return new (zone) HInstr(context, left, right, strength);                 \
+#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op)                     \
+  HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context,   \
+                            HValue* left, HValue* right) {                   \
+    if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {  \
+      HConstant* c_left = HConstant::cast(left);                             \
+      HConstant* c_right = HConstant::cast(right);                           \
+      if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {         \
+        double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
+        if (IsInt32Double(double_res)) {                                     \
+          return H_CONSTANT_INT(double_res);                                 \
+        }                                                                    \
+        return H_CONSTANT_DOUBLE(double_res);                                \
+      }                                                                      \
+    }                                                                        \
+    return new (zone) HInstr(context, left, right);                          \
   }
 
-
 DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +)
 DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HMul, *)
 DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
@@ -4198,9 +4155,8 @@
   return new(zone) HMathMinMax(context, left, right, op);
 }
 
-
 HInstruction* HMod::New(Isolate* isolate, Zone* zone, HValue* context,
-                        HValue* left, HValue* right, Strength strength) {
+                        HValue* left, HValue* right) {
   if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
     HConstant* c_left = HConstant::cast(left);
     HConstant* c_right = HConstant::cast(right);
@@ -4219,12 +4175,11 @@
       }
     }
   }
-  return new (zone) HMod(context, left, right, strength);
+  return new (zone) HMod(context, left, right);
 }
 
-
 HInstruction* HDiv::New(Isolate* isolate, Zone* zone, HValue* context,
-                        HValue* left, HValue* right, Strength strength) {
+                        HValue* left, HValue* right) {
   // If left and right are constant values, try to return a constant value.
   if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
     HConstant* c_left = HConstant::cast(left);
@@ -4243,13 +4198,11 @@
       }
     }
   }
-  return new (zone) HDiv(context, left, right, strength);
+  return new (zone) HDiv(context, left, right);
 }
 
-
 HInstruction* HBitwise::New(Isolate* isolate, Zone* zone, HValue* context,
-                            Token::Value op, HValue* left, HValue* right,
-                            Strength strength) {
+                            Token::Value op, HValue* left, HValue* right) {
   if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
     HConstant* c_left = HConstant::cast(left);
     HConstant* c_right = HConstant::cast(right);
@@ -4274,24 +4227,22 @@
       return H_CONSTANT_INT(result);
     }
   }
-  return new (zone) HBitwise(context, op, left, right, strength);
+  return new (zone) HBitwise(context, op, left, right);
 }
 
-
-#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result)                            \
-  HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context,    \
-                            HValue* left, HValue* right, Strength strength) { \
-    if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {   \
-      HConstant* c_left = HConstant::cast(left);                              \
-      HConstant* c_right = HConstant::cast(right);                            \
-      if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {          \
-        return H_CONSTANT_INT(result);                                        \
-      }                                                                       \
-    }                                                                         \
-    return new (zone) HInstr(context, left, right, strength);                 \
+#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result)                          \
+  HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context,  \
+                            HValue* left, HValue* right) {                  \
+    if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
+      HConstant* c_left = HConstant::cast(left);                            \
+      HConstant* c_right = HConstant::cast(right);                          \
+      if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {        \
+        return H_CONSTANT_INT(result);                                      \
+      }                                                                     \
+    }                                                                       \
+    return new (zone) HInstr(context, left, right);                         \
   }
 
-
 DEFINE_NEW_H_BITWISE_INSTR(HSar,
 c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f))
 DEFINE_NEW_H_BITWISE_INSTR(HShl,
@@ -4299,9 +4250,8 @@
 
 #undef DEFINE_NEW_H_BITWISE_INSTR
 
-
 HInstruction* HShr::New(Isolate* isolate, Zone* zone, HValue* context,
-                        HValue* left, HValue* right, Strength strength) {
+                        HValue* left, HValue* right) {
   if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
     HConstant* c_left = HConstant::cast(left);
     HConstant* c_right = HConstant::cast(right);
@@ -4314,7 +4264,7 @@
       return H_CONSTANT_INT(static_cast<uint32_t>(left_val) >> right_val);
     }
   }
-  return new (zone) HShr(context, left, right, strength);
+  return new (zone) HShr(context, left, right);
 }
 
 
diff --git a/src/crankshaft/hydrogen-instructions.h b/src/crankshaft/hydrogen-instructions.h
index 13ada8c..22ed052 100644
--- a/src/crankshaft/hydrogen-instructions.h
+++ b/src/crankshaft/hydrogen-instructions.h
@@ -48,7 +48,6 @@
   V(AbnormalExit)                             \
   V(AccessArgumentsAt)                        \
   V(Add)                                      \
-  V(AllocateBlockContext)                     \
   V(Allocate)                                 \
   V(ApplyArguments)                           \
   V(ArgumentsElements)                        \
@@ -64,7 +63,6 @@
   V(CallFunction)                             \
   V(CallNewArray)                             \
   V(CallRuntime)                              \
-  V(CallStub)                                 \
   V(CapturedObject)                           \
   V(Change)                                   \
   V(CheckArrayBufferNotNeutered)              \
@@ -79,7 +77,6 @@
   V(CompareNumericAndBranch)                  \
   V(CompareHoleAndBranch)                     \
   V(CompareGeneric)                           \
-  V(CompareMinusZeroAndBranch)                \
   V(CompareObjectEqAndBranch)                 \
   V(CompareMap)                               \
   V(Constant)                                 \
@@ -117,7 +114,6 @@
   V(LoadNamedField)                           \
   V(LoadNamedGeneric)                         \
   V(LoadRoot)                                 \
-  V(MapEnumLength)                            \
   V(MathFloorOfDiv)                           \
   V(MathMinMax)                               \
   V(MaybeGrowElements)                        \
@@ -2402,7 +2398,9 @@
   HValue* context() const { return first(); }
   HValue* function() const { return second(); }
 
-  ConvertReceiverMode convert_mode() const { return convert_mode_; }
+  ConvertReceiverMode convert_mode() const {
+    return ConvertReceiverModeField::decode(bit_field_);
+  }
   FeedbackVectorSlot slot() const { return slot_; }
   Handle<TypeFeedbackVector> feedback_vector() const {
     return feedback_vector_;
@@ -2424,10 +2422,14 @@
   HCallFunction(HValue* context, HValue* function, int argument_count,
                 ConvertReceiverMode convert_mode)
       : HBinaryCall(context, function, argument_count),
-        convert_mode_(convert_mode) {}
+        bit_field_(ConvertReceiverModeField::encode(convert_mode)) {}
   Handle<TypeFeedbackVector> feedback_vector_;
   FeedbackVectorSlot slot_;
-  ConvertReceiverMode convert_mode_;
+
+  class ConvertReceiverModeField : public BitField<ConvertReceiverMode, 0, 2> {
+  };
+
+  uint32_t bit_field_;
 };
 
 
@@ -2493,31 +2495,6 @@
 };
 
 
-class HMapEnumLength final : public HUnaryOperation {
- public:
-  DECLARE_INSTRUCTION_FACTORY_P1(HMapEnumLength, HValue*);
-
-  Representation RequiredInputRepresentation(int index) override {
-    return Representation::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
-
- protected:
-  bool DataEquals(HValue* other) override { return true; }
-
- private:
-  explicit HMapEnumLength(HValue* value)
-      : HUnaryOperation(value, HType::Smi()) {
-    set_representation(Representation::Smi());
-    SetFlag(kUseGVN);
-    SetDependsOnFlag(kMaps);
-  }
-
-  bool IsDeletable() const override { return true; }
-};
-
-
 class HUnaryMathOperation final : public HTemplateInstruction<2> {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
@@ -2810,8 +2787,6 @@
     bool in_new_space = isolate->heap()->InNewSpace(*func);
     // NOTE: We create an uninitialized Unique and initialize it later.
     // This is because a JSFunction can move due to GC during graph creation.
-    // TODO(titzer): This is a migration crutch. Replace with some kind of
-    // Uniqueness scope later.
     Unique<JSFunction> target = Unique<JSFunction>::CreateUninitialized(func);
     HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
     return check;
@@ -3341,7 +3316,6 @@
   Representation representation_from_non_phi_uses_ = Representation::None();
   bool has_type_feedback_from_uses_ = false;
 
-  // TODO(titzer): we can't eliminate the receiver for generating backtraces
   bool IsDeletable() const override { return !IsReceiver(); }
 };
 
@@ -3756,9 +3730,8 @@
 class HBinaryOperation : public HTemplateInstruction<3> {
  public:
   HBinaryOperation(HValue* context, HValue* left, HValue* right,
-                   Strength strength, HType type = HType::Tagged())
+                   HType type = HType::Tagged())
       : HTemplateInstruction<3>(type),
-        strength_(strength),
         observed_output_representation_(Representation::None()) {
     DCHECK(left != NULL && right != NULL);
     SetOperandAt(0, context);
@@ -3771,7 +3744,6 @@
   HValue* context() const { return OperandAt(0); }
   HValue* left() const { return OperandAt(1); }
   HValue* right() const { return OperandAt(2); }
-  Strength strength() const { return strength_; }
 
   // True if switching left and right operands likely generates better code.
   bool AreOperandsBetterSwitched() {
@@ -3847,13 +3819,10 @@
     return base::bits::IsPowerOfTwo32(static_cast<uint32_t>(value));
   }
 
-  Strength strength() { return strength_; }
-
   DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
 
  private:
   bool IgnoreObservedOutputRepresentation(Representation current_rep);
-  Strength strength_;
 
   Representation observed_input_representation_[2];
   Representation observed_output_representation_;
@@ -4123,11 +4092,10 @@
 class HBitwiseBinaryOperation : public HBinaryOperation {
  public:
   HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
-                          Strength strength, HType type = HType::TaggedNumber())
-      : HBinaryOperation(context, left, right, strength, type) {
+                          HType type = HType::TaggedNumber())
+      : HBinaryOperation(context, left, right, type) {
     SetFlag(kFlexibleRepresentation);
     SetFlag(kTruncatingToInt32);
-    if (!is_strong(strength)) SetFlag(kAllowUndefinedAsNaN);
     SetAllSideEffects();
   }
 
@@ -4182,7 +4150,7 @@
 
  private:
   HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
-      : HBinaryOperation(context, left, right, Strength::WEAK) {
+      : HBinaryOperation(context, left, right) {
     set_representation(Representation::Integer32());
     SetFlag(kUseGVN);
     SetFlag(kCanOverflow);
@@ -4201,13 +4169,11 @@
 
 class HArithmeticBinaryOperation : public HBinaryOperation {
  public:
-  HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right,
-                             Strength strength)
-      : HBinaryOperation(context, left, right, strength,
-                         HType::TaggedNumber()) {
+  HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
+      : HBinaryOperation(context, left, right, HType::TaggedNumber()) {
     SetAllSideEffects();
     SetFlag(kFlexibleRepresentation);
-    if (!is_strong(strength)) SetFlag(kAllowUndefinedAsNaN);
+    SetFlag(kAllowUndefinedAsNaN);
   }
 
   void RepresentationChanged(Representation to) override {
@@ -4232,9 +4198,8 @@
 class HCompareGeneric final : public HBinaryOperation {
  public:
   static HCompareGeneric* New(Isolate* isolate, Zone* zone, HValue* context,
-                              HValue* left, HValue* right, Token::Value token,
-                              Strength strength = Strength::WEAK) {
-    return new (zone) HCompareGeneric(context, left, right, token, strength);
+                              HValue* left, HValue* right, Token::Value token) {
+    return new (zone) HCompareGeneric(context, left, right, token);
   }
 
   Representation RequiredInputRepresentation(int index) override {
@@ -4250,8 +4215,8 @@
 
  private:
   HCompareGeneric(HValue* context, HValue* left, HValue* right,
-                  Token::Value token, Strength strength)
-      : HBinaryOperation(context, left, right, strength, HType::Boolean()),
+                  Token::Value token)
+      : HBinaryOperation(context, left, right, HType::Boolean()),
         token_(token) {
     DCHECK(Token::IsCompareOp(token));
     set_representation(Representation::Tagged());
@@ -4268,17 +4233,9 @@
                                        HValue* context, HValue* left,
                                        HValue* right, Token::Value token,
                                        HBasicBlock* true_target = NULL,
-                                       HBasicBlock* false_target = NULL,
-                                       Strength strength = Strength::WEAK) {
-    return new (zone) HCompareNumericAndBranch(left, right, token, true_target,
-                                               false_target, strength);
-  }
-  static HCompareNumericAndBranch* New(Isolate* isolate, Zone* zone,
-                                       HValue* context, HValue* left,
-                                       HValue* right, Token::Value token,
-                                       Strength strength) {
+                                       HBasicBlock* false_target = NULL) {
     return new (zone)
-        HCompareNumericAndBranch(left, right, token, NULL, NULL, strength);
+        HCompareNumericAndBranch(left, right, token, true_target, false_target);
   }
 
   HValue* left() const { return OperandAt(0); }
@@ -4302,8 +4259,6 @@
 
   bool KnownSuccessorBlock(HBasicBlock** block) override;
 
-  Strength strength() const { return strength_; }
-
   std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
 
   void SetOperandPositions(Zone* zone, SourcePosition left_pos,
@@ -4316,9 +4271,8 @@
 
  private:
   HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token,
-                           HBasicBlock* true_target, HBasicBlock* false_target,
-                           Strength strength)
-      : token_(token), strength_(strength) {
+                           HBasicBlock* true_target, HBasicBlock* false_target)
+      : token_(token) {
     SetFlag(kFlexibleRepresentation);
     DCHECK(Token::IsCompareOp(token));
     SetOperandAt(0, left);
@@ -4329,7 +4283,6 @@
 
   Representation observed_input_representation_[2];
   Token::Value token_;
-  Strength strength_;
 };
 
 
@@ -4358,27 +4311,6 @@
 };
 
 
-class HCompareMinusZeroAndBranch final : public HUnaryControlInstruction {
- public:
-  DECLARE_INSTRUCTION_FACTORY_P1(HCompareMinusZeroAndBranch, HValue*);
-
-  void InferRepresentation(HInferRepresentationPhase* h_infer) override;
-
-  Representation RequiredInputRepresentation(int index) override {
-    return representation();
-  }
-
-  bool KnownSuccessorBlock(HBasicBlock** block) override;
-
-  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch)
-
- private:
-  explicit HCompareMinusZeroAndBranch(HValue* value)
-      : HUnaryControlInstruction(value, NULL, NULL) {
-  }
-};
-
-
 class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HCompareObjectEqAndBranch, HValue*, HValue*);
@@ -4682,8 +4614,7 @@
 
  private:
   HInstanceOf(HValue* context, HValue* left, HValue* right)
-      : HBinaryOperation(context, left, right, Strength::WEAK,
-                         HType::Boolean()) {
+      : HBinaryOperation(context, left, right, HType::Boolean()) {
     set_representation(Representation::Tagged());
     SetAllSideEffects();
   }
@@ -4766,10 +4697,9 @@
 class HAdd final : public HArithmeticBinaryOperation {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
-                           HValue* left, HValue* right,
-                           Strength strength = Strength::WEAK);
+                           HValue* left, HValue* right);
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
-                           HValue* left, HValue* right, Strength strength,
+                           HValue* left, HValue* right,
                            ExternalAddType external_add_type);
 
   // Add is only commutative if two integer values are added and not if two
@@ -4831,9 +4761,9 @@
   Range* InferRange(Zone* zone) override;
 
  private:
-  HAdd(HValue* context, HValue* left, HValue* right, Strength strength,
+  HAdd(HValue* context, HValue* left, HValue* right,
        ExternalAddType external_add_type = NoExternalAdd)
-      : HArithmeticBinaryOperation(context, left, right, strength),
+      : HArithmeticBinaryOperation(context, left, right),
         external_add_type_(external_add_type) {
     SetFlag(kCanOverflow);
     switch (external_add_type_) {
@@ -4868,8 +4798,7 @@
 class HSub final : public HArithmeticBinaryOperation {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
-                           HValue* left, HValue* right,
-                           Strength strength = Strength::WEAK);
+                           HValue* left, HValue* right);
 
   HValue* Canonicalize() override;
 
@@ -4890,8 +4819,8 @@
   Range* InferRange(Zone* zone) override;
 
  private:
-  HSub(HValue* context, HValue* left, HValue* right, Strength strength)
-      : HArithmeticBinaryOperation(context, left, right, strength) {
+  HSub(HValue* context, HValue* left, HValue* right)
+      : HArithmeticBinaryOperation(context, left, right) {
     SetFlag(kCanOverflow);
   }
 };
@@ -4900,14 +4829,11 @@
 class HMul final : public HArithmeticBinaryOperation {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
-                           HValue* left, HValue* right,
-                           Strength strength = Strength::WEAK);
+                           HValue* left, HValue* right);
 
   static HInstruction* NewImul(Isolate* isolate, Zone* zone, HValue* context,
-                               HValue* left, HValue* right,
-                               Strength strength = Strength::WEAK) {
-    HInstruction* instr =
-        HMul::New(isolate, zone, context, left, right, strength);
+                               HValue* left, HValue* right) {
+    HInstruction* instr = HMul::New(isolate, zone, context, left, right);
     if (!instr->IsMul()) return instr;
     HMul* mul = HMul::cast(instr);
     // TODO(mstarzinger): Prevent bailout on minus zero for imul.
@@ -4937,8 +4863,8 @@
   Range* InferRange(Zone* zone) override;
 
  private:
-  HMul(HValue* context, HValue* left, HValue* right, Strength strength)
-      : HArithmeticBinaryOperation(context, left, right, strength) {
+  HMul(HValue* context, HValue* left, HValue* right)
+      : HArithmeticBinaryOperation(context, left, right) {
     SetFlag(kCanOverflow);
   }
 };
@@ -4947,8 +4873,7 @@
 class HMod final : public HArithmeticBinaryOperation {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
-                           HValue* left, HValue* right,
-                           Strength strength = Strength::WEAK);
+                           HValue* left, HValue* right);
 
   HValue* Canonicalize() override;
 
@@ -4967,8 +4892,8 @@
   Range* InferRange(Zone* zone) override;
 
  private:
-  HMod(HValue* context, HValue* left, HValue* right, Strength strength)
-      : HArithmeticBinaryOperation(context, left, right, strength) {
+  HMod(HValue* context, HValue* left, HValue* right)
+      : HArithmeticBinaryOperation(context, left, right) {
     SetFlag(kCanBeDivByZero);
     SetFlag(kCanOverflow);
     SetFlag(kLeftCanBeNegative);
@@ -4979,8 +4904,7 @@
 class HDiv final : public HArithmeticBinaryOperation {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
-                           HValue* left, HValue* right,
-                           Strength strength = Strength::WEAK);
+                           HValue* left, HValue* right);
 
   HValue* Canonicalize() override;
 
@@ -4999,8 +4923,8 @@
   Range* InferRange(Zone* zone) override;
 
  private:
-  HDiv(HValue* context, HValue* left, HValue* right, Strength strength)
-      : HArithmeticBinaryOperation(context, left, right, strength) {
+  HDiv(HValue* context, HValue* left, HValue* right)
+      : HArithmeticBinaryOperation(context, left, right) {
     SetFlag(kCanBeDivByZero);
     SetFlag(kCanOverflow);
   }
@@ -5046,8 +4970,7 @@
 
  private:
   HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
-      : HArithmeticBinaryOperation(context, left, right, Strength::WEAK),
-        operation_(op) {}
+      : HArithmeticBinaryOperation(context, left, right), operation_(op) {}
 
   Operation operation_;
 };
@@ -5056,8 +4979,7 @@
 class HBitwise final : public HBitwiseBinaryOperation {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
-                           Token::Value op, HValue* left, HValue* right,
-                           Strength strength = Strength::WEAK);
+                           Token::Value op, HValue* left, HValue* right);
 
   Token::Value op() const { return op_; }
 
@@ -5077,9 +4999,8 @@
   Range* InferRange(Zone* zone) override;
 
  private:
-  HBitwise(HValue* context, Token::Value op, HValue* left, HValue* right,
-           Strength strength)
-      : HBitwiseBinaryOperation(context, left, right, strength), op_(op) {
+  HBitwise(HValue* context, Token::Value op, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right), op_(op) {
     DCHECK(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
     // BIT_AND with a smi-range positive value will always unset the
     // entire sign-extension of the smi-sign.
@@ -5113,8 +5034,7 @@
 class HShl final : public HBitwiseBinaryOperation {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
-                           HValue* left, HValue* right,
-                           Strength strength = Strength::WEAK);
+                           HValue* left, HValue* right);
 
   Range* InferRange(Zone* zone) override;
 
@@ -5135,16 +5055,15 @@
   bool DataEquals(HValue* other) override { return true; }
 
  private:
-  HShl(HValue* context, HValue* left, HValue* right, Strength strength)
-      : HBitwiseBinaryOperation(context, left, right, strength) {}
+  HShl(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) {}
 };
 
 
 class HShr final : public HBitwiseBinaryOperation {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
-                           HValue* left, HValue* right,
-                           Strength strength = Strength::WEAK);
+                           HValue* left, HValue* right);
 
   bool TryDecompose(DecompositionResult* decomposition) override {
     if (right()->IsInteger32Constant()) {
@@ -5173,16 +5092,15 @@
   bool DataEquals(HValue* other) override { return true; }
 
  private:
-  HShr(HValue* context, HValue* left, HValue* right, Strength strength)
-      : HBitwiseBinaryOperation(context, left, right, strength) {}
+  HShr(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) {}
 };
 
 
 class HSar final : public HBitwiseBinaryOperation {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
-                           HValue* left, HValue* right,
-                           Strength strength = Strength::WEAK);
+                           HValue* left, HValue* right);
 
   bool TryDecompose(DecompositionResult* decomposition) override {
     if (right()->IsInteger32Constant()) {
@@ -5211,17 +5129,16 @@
   bool DataEquals(HValue* other) override { return true; }
 
  private:
-  HSar(HValue* context, HValue* left, HValue* right, Strength strength)
-      : HBitwiseBinaryOperation(context, left, right, strength) {}
+  HSar(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) {}
 };
 
 
 class HRor final : public HBitwiseBinaryOperation {
  public:
   static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
-                           HValue* left, HValue* right,
-                           Strength strength = Strength::WEAK) {
-    return new (zone) HRor(context, left, right, strength);
+                           HValue* left, HValue* right) {
+    return new (zone) HRor(context, left, right);
   }
 
   void UpdateRepresentation(Representation new_rep,
@@ -5237,8 +5154,8 @@
   bool DataEquals(HValue* other) override { return true; }
 
  private:
-  HRor(HValue* context, HValue* left, HValue* right, Strength strength)
-      : HBitwiseBinaryOperation(context, left, right, strength) {
+  HRor(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) {
     ChangeRepresentation(Representation::Integer32());
   }
 };
@@ -5316,27 +5233,6 @@
 };
 
 
-class HCallStub final : public HUnaryCall {
- public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallStub, CodeStub::Major, int);
-  CodeStub::Major major_key() { return major_key_; }
-
-  HValue* context() { return value(); }
-
-  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
-
-  DECLARE_CONCRETE_INSTRUCTION(CallStub)
-
- private:
-  HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
-      : HUnaryCall(context, argument_count),
-        major_key_(major_key) {
-  }
-
-  CodeStub::Major major_key_;
-};
-
-
 class HUnknownOSRValue final : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int);
@@ -6027,6 +5923,11 @@
                          Representation::UInteger8());
   }
 
+  static HObjectAccess ForMapBitField3() {
+    return HObjectAccess(kInobject, Map::kBitField3Offset,
+                         Representation::Integer32());
+  }
+
   static HObjectAccess ForNameHashField() {
     return HObjectAccess(kInobject,
                          Name::kHashFieldOffset,
@@ -6399,9 +6300,8 @@
 
 class HLoadNamedGeneric final : public HTemplateInstruction<2> {
  public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadNamedGeneric, HValue*,
-                                              Handle<Name>, LanguageMode,
-                                              InlineCacheState);
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadNamedGeneric, HValue*,
+                                              Handle<Name>, InlineCacheState);
 
   HValue* context() const { return OperandAt(0); }
   HValue* object() const { return OperandAt(1); }
@@ -6429,14 +6329,10 @@
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
 
-  LanguageMode language_mode() const { return language_mode_; }
-
  private:
   HLoadNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
-                    LanguageMode language_mode,
                     InlineCacheState initialization_state)
       : name_(name),
-        language_mode_(language_mode),
         initialization_state_(initialization_state) {
     SetOperandAt(0, context);
     SetOperandAt(1, object);
@@ -6447,7 +6343,6 @@
   Handle<Name> name_;
   Handle<TypeFeedbackVector> feedback_vector_;
   FeedbackVectorSlot slot_;
-  LanguageMode language_mode_;
   InlineCacheState initialization_state_;
 };
 
@@ -6690,9 +6585,8 @@
 
 class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
  public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadKeyedGeneric, HValue*,
-                                              HValue*, LanguageMode,
-                                              InlineCacheState);
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadKeyedGeneric, HValue*,
+                                              HValue*, InlineCacheState);
   HValue* object() const { return OperandAt(0); }
   HValue* key() const { return OperandAt(1); }
   HValue* context() const { return OperandAt(2); }
@@ -6724,14 +6618,10 @@
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
 
-  LanguageMode language_mode() const { return language_mode_; }
-
  private:
   HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key,
-                    LanguageMode language_mode,
                     InlineCacheState initialization_state)
-      : initialization_state_(initialization_state),
-        language_mode_(language_mode) {
+      : initialization_state_(initialization_state) {
     set_representation(Representation::Tagged());
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
@@ -6742,7 +6632,6 @@
   Handle<TypeFeedbackVector> feedback_vector_;
   FeedbackVectorSlot slot_;
   InlineCacheState initialization_state_;
-  LanguageMode language_mode_;
 };
 
 
@@ -7298,7 +7187,7 @@
   HStringAdd(HValue* context, HValue* left, HValue* right,
              PretenureFlag pretenure_flag, StringAddFlags flags,
              Handle<AllocationSite> allocation_site)
-      : HBinaryOperation(context, left, right, Strength::WEAK, HType::String()),
+      : HBinaryOperation(context, left, right, HType::String()),
         flags_(flags),
         pretenure_flag_(pretenure_flag) {
     set_representation(Representation::Tagged());
@@ -7778,36 +7667,6 @@
 };
 
 
-class HAllocateBlockContext: public HTemplateInstruction<2> {
- public:
-  DECLARE_INSTRUCTION_FACTORY_P3(HAllocateBlockContext, HValue*,
-                                 HValue*, Handle<ScopeInfo>);
-  HValue* context() const { return OperandAt(0); }
-  HValue* function() const { return OperandAt(1); }
-  Handle<ScopeInfo> scope_info() const { return scope_info_; }
-
-  Representation RequiredInputRepresentation(int index) override {
-    return Representation::Tagged();
-  }
-
-  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext)
-
- private:
-  HAllocateBlockContext(HValue* context,
-                        HValue* function,
-                        Handle<ScopeInfo> scope_info)
-      : scope_info_(scope_info) {
-    SetOperandAt(0, context);
-    SetOperandAt(1, function);
-    set_representation(Representation::Tagged());
-  }
-
-  Handle<ScopeInfo> scope_info_;
-};
-
-
 
 #undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
diff --git a/src/crankshaft/hydrogen-load-elimination.cc b/src/crankshaft/hydrogen-load-elimination.cc
index da8d186..88963fc 100644
--- a/src/crankshaft/hydrogen-load-elimination.cc
+++ b/src/crankshaft/hydrogen-load-elimination.cc
@@ -243,7 +243,6 @@
     if (instr->has_transition()) {
       // A transition introduces a new field and alters the map of the object.
       // Since the field in the object is new, it cannot alias existing entries.
-      // TODO(titzer): introduce a constant for the new map and remember it.
       KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
     } else {
       // Kill non-equivalent may-alias entries.
@@ -402,7 +401,6 @@
   // Compute the field index for the given in-object offset; -1 if not tracked.
   int FieldOf(int offset) {
     if (offset >= kMaxTrackedFields * kPointerSize) return -1;
-    // TODO(titzer): track misaligned loads in a separate list?
     if ((offset % kPointerSize) != 0) return -1;  // Ignore misaligned accesses.
     return offset / kPointerSize;
   }
diff --git a/src/crankshaft/hydrogen-range-analysis.cc b/src/crankshaft/hydrogen-range-analysis.cc
index f5eba5e..a489e01 100644
--- a/src/crankshaft/hydrogen-range-analysis.cc
+++ b/src/crankshaft/hydrogen-range-analysis.cc
@@ -71,12 +71,6 @@
                 instr->to().IsSmiOrInteger32());
           PropagateMinusZeroChecks(instr->value());
         }
-      } else if (value->IsCompareMinusZeroAndBranch()) {
-        HCompareMinusZeroAndBranch* instr =
-            HCompareMinusZeroAndBranch::cast(value);
-        if (instr->value()->representation().IsSmiOrInteger32()) {
-          PropagateMinusZeroChecks(instr->value());
-        }
       }
     }
 
diff --git a/src/crankshaft/hydrogen-store-elimination.cc b/src/crankshaft/hydrogen-store-elimination.cc
index ba32c8a..57c7880 100644
--- a/src/crankshaft/hydrogen-store-elimination.cc
+++ b/src/crankshaft/hydrogen-store-elimination.cc
@@ -36,7 +36,6 @@
       HInstruction* instr = it.Current();
       if (instr->CheckFlag(HValue::kIsDead)) continue;
 
-      // TODO(titzer): eliminate unobserved HStoreKeyed instructions too.
       switch (instr->opcode()) {
         case HValue::kStoreNamedField:
           // Remove any unobserved stores overwritten by this store.
@@ -68,7 +67,6 @@
              prev->id(), store->id()));
       unobserved_.Remove(i);
     } else {
-      // TODO(titzer): remove map word clearing from folded allocations.
       i++;
     }
   }
diff --git a/src/crankshaft/hydrogen-types.cc b/src/crankshaft/hydrogen-types.cc
index 9c5e341..8c85625 100644
--- a/src/crankshaft/hydrogen-types.cc
+++ b/src/crankshaft/hydrogen-types.cc
@@ -4,39 +4,33 @@
 
 #include "src/crankshaft/hydrogen-types.h"
 
+#include "src/field-type.h"
+#include "src/handles-inl.h"
 #include "src/ostreams.h"
-#include "src/types-inl.h"
-
 
 namespace v8 {
 namespace internal {
 
 // static
-template <class T>
-HType HType::FromType(typename T::TypeHandle type) {
-  if (T::Any()->Is(type)) return HType::Any();
+HType HType::FromType(Type* type) {
+  if (Type::Any()->Is(type)) return HType::Any();
   if (!type->IsInhabited()) return HType::None();
-  if (type->Is(T::SignedSmall())) return HType::Smi();
-  if (type->Is(T::Number())) return HType::TaggedNumber();
-  if (type->Is(T::Null())) return HType::Null();
-  if (type->Is(T::String())) return HType::String();
-  if (type->Is(T::Boolean())) return HType::Boolean();
-  if (type->Is(T::Undefined())) return HType::Undefined();
-  if (type->Is(T::Object())) return HType::JSObject();
-  if (type->Is(T::Receiver())) return HType::JSReceiver();
+  if (type->Is(Type::SignedSmall())) return HType::Smi();
+  if (type->Is(Type::Number())) return HType::TaggedNumber();
+  if (type->Is(Type::Null())) return HType::Null();
+  if (type->Is(Type::String())) return HType::String();
+  if (type->Is(Type::Boolean())) return HType::Boolean();
+  if (type->Is(Type::Undefined())) return HType::Undefined();
+  if (type->Is(Type::Object())) return HType::JSObject();
+  if (type->Is(Type::Receiver())) return HType::JSReceiver();
   return HType::Tagged();
 }
 
 
 // static
-template
-HType HType::FromType<Type>(Type* type);
-
-
-// static
-template
-HType HType::FromType<HeapType>(Handle<HeapType> type);
-
+HType HType::FromFieldType(Handle<FieldType> type, Zone* temp_zone) {
+  return FromType(type->Convert(temp_zone));
+}
 
 // static
 HType HType::FromValue(Handle<Object> value) {
diff --git a/src/crankshaft/hydrogen-types.h b/src/crankshaft/hydrogen-types.h
index 87148ee..0690ece 100644
--- a/src/crankshaft/hydrogen-types.h
+++ b/src/crankshaft/hydrogen-types.h
@@ -9,12 +9,14 @@
 #include <iosfwd>
 
 #include "src/base/macros.h"
+#include "src/types.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
 template <typename T> class Handle;
+class FieldType;
 class Object;
 
 #define HTYPE_LIST(V)                               \
@@ -62,8 +64,9 @@
   HTYPE_LIST(DECLARE_IS_TYPE)
   #undef DECLARE_IS_TYPE
 
-  template <class T>
-  static HType FromType(typename T::TypeHandle type) WARN_UNUSED_RESULT;
+  static HType FromType(Type* type) WARN_UNUSED_RESULT;
+  static HType FromFieldType(Handle<FieldType> type,
+                             Zone* temp_zone) WARN_UNUSED_RESULT;
   static HType FromValue(Handle<Object> value) WARN_UNUSED_RESULT;
 
   friend std::ostream& operator<<(std::ostream& os, const HType& t);
diff --git a/src/crankshaft/hydrogen.cc b/src/crankshaft/hydrogen.cc
index 98337be..b6fdd3a 100644
--- a/src/crankshaft/hydrogen.cc
+++ b/src/crankshaft/hydrogen.cc
@@ -34,6 +34,7 @@
 #include "src/crankshaft/hydrogen-uint32-analysis.h"
 #include "src/crankshaft/lithium-allocator.h"
 #include "src/crankshaft/typing.h"
+#include "src/field-type.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
@@ -687,33 +688,28 @@
   return value ? GetConstantTrue() : GetConstantFalse();
 }
 
+#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value,         \
+                            undetectable)                                   \
+  HConstant* HGraph::GetConstant##Name() {                                  \
+    if (!constant_##name##_.is_set()) {                                     \
+      HConstant* constant = new (zone()) HConstant(                         \
+          Unique<Object>::CreateImmovable(                                  \
+              isolate()->factory()->name##_value()),                        \
+          Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \
+          false, Representation::Tagged(), htype, true, boolean_value,      \
+          undetectable, ODDBALL_TYPE);                                      \
+      constant->InsertAfter(entry_block()->first());                        \
+      constant_##name##_.set(constant);                                     \
+    }                                                                       \
+    return ReinsertConstantIfNecessary(constant_##name##_.get());           \
+  }
 
-#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value)            \
-HConstant* HGraph::GetConstant##Name() {                                       \
-  if (!constant_##name##_.is_set()) {                                          \
-    HConstant* constant = new(zone()) HConstant(                               \
-        Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
-        Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()),      \
-        false,                                                                 \
-        Representation::Tagged(),                                              \
-        htype,                                                                 \
-        true,                                                                  \
-        boolean_value,                                                         \
-        false,                                                                 \
-        ODDBALL_TYPE);                                                         \
-    constant->InsertAfter(entry_block()->first());                             \
-    constant_##name##_.set(constant);                                          \
-  }                                                                            \
-  return ReinsertConstantIfNecessary(constant_##name##_.get());                \
-}
-
-
-DEFINE_GET_CONSTANT(Undefined, undefined, undefined, HType::Undefined(), false)
-DEFINE_GET_CONSTANT(True, true, boolean, HType::Boolean(), true)
-DEFINE_GET_CONSTANT(False, false, boolean, HType::Boolean(), false)
-DEFINE_GET_CONSTANT(Hole, the_hole, the_hole, HType::None(), false)
-DEFINE_GET_CONSTANT(Null, null, null, HType::Null(), false)
-
+DEFINE_GET_CONSTANT(Undefined, undefined, undefined, HType::Undefined(), false,
+                    true)
+DEFINE_GET_CONSTANT(True, true, boolean, HType::Boolean(), true, false)
+DEFINE_GET_CONSTANT(False, false, boolean, HType::Boolean(), false, false)
+DEFINE_GET_CONSTANT(Hole, the_hole, the_hole, HType::None(), false, false)
+DEFINE_GET_CONSTANT(Null, null, null, HType::Null(), false, true)
 
 #undef DEFINE_GET_CONSTANT
 
@@ -1185,7 +1181,7 @@
 
 
 HGraph* HGraphBuilder::CreateGraph() {
-  graph_ = new(zone()) HGraph(info_);
+  graph_ = new (zone()) HGraph(info_, descriptor_);
   if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
   CompilationPhase phase("H_Block building", info_);
   set_current_block(graph()->entry_block());
@@ -1275,6 +1271,14 @@
 }
 
 
+HValue* HGraphBuilder::BuildEnumLength(HValue* map) {
+  NoObservableSideEffectsScope scope(this);
+  HValue* bit_field3 =
+      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
+  return BuildDecodeField<Map::EnumLengthBits>(bit_field3);
+}
+
+
 HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
   if (obj->type().IsHeapObject()) return obj;
   return Add<HCheckHeapObject>(obj);
@@ -1671,10 +1675,10 @@
   return AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
 }
 
-
-HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
-    HValue* receiver, HValue* elements, HValue* key, HValue* hash,
-    LanguageMode language_mode) {
+HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
+                                                           HValue* elements,
+                                                           HValue* key,
+                                                           HValue* hash) {
   HValue* capacity =
       Add<HLoadKeyed>(elements, Add<HConstant>(NameDictionary::kCapacityIndex),
                       nullptr, nullptr, FAST_ELEMENTS);
@@ -1717,11 +1721,8 @@
     // element == undefined means "not found". Call the runtime.
     // TODO(jkummerow): walk the prototype chain instead.
     Add<HPushArguments>(receiver, key);
-    Push(Add<HCallRuntime>(
-        Runtime::FunctionForId(is_strong(language_mode)
-                                   ? Runtime::kKeyedGetPropertyStrong
-                                   : Runtime::kKeyedGetProperty),
-        2));
+    Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kKeyedGetProperty),
+                           2));
   }
   if_undefined.Else();
   {
@@ -1780,11 +1781,8 @@
                          FAST_ELEMENTS));
     details_compare.Else();
     Add<HPushArguments>(receiver, key);
-    Push(Add<HCallRuntime>(
-        Runtime::FunctionForId(is_strong(language_mode)
-                                   ? Runtime::kKeyedGetPropertyStrong
-                                   : Runtime::kKeyedGetProperty),
-        2));
+    Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kKeyedGetProperty),
+                           2));
     details_compare.End();
 
     found_key_match.Else();
@@ -1819,7 +1817,7 @@
   // Allocate the JSIteratorResult object.
   HValue* result =
       Add<HAllocate>(Add<HConstant>(JSIteratorResult::kSize), HType::JSObject(),
-                     NOT_TENURED, JS_ITERATOR_RESULT_TYPE);
+                     NOT_TENURED, JS_OBJECT_TYPE);
 
   // Initialize the JSIteratorResult object.
   HValue* native_context = BuildGetNativeContext();
@@ -2052,6 +2050,20 @@
   return Pop();
 }
 
+HValue* HGraphBuilder::BuildToNumber(HValue* input) {
+  if (input->type().IsTaggedNumber()) {
+    return input;
+  }
+  Callable callable = CodeFactory::ToNumber(isolate());
+  HValue* stub = Add<HConstant>(callable.code());
+  HValue* values[] = {context(), input};
+  HCallWithDescriptor* instr =
+      Add<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+                               Vector<HValue*>(values, arraysize(values)));
+  instr->set_type(HType::TaggedNumber());
+  return instr;
+}
+
 
 HValue* HGraphBuilder::BuildToObject(HValue* receiver) {
   NoObservableSideEffectsScope scope(this);
@@ -2578,8 +2590,8 @@
         HObjectAccess::ForFixedTypedArrayBaseExternalPointer());
     HValue* base_pointer = Add<HLoadNamedField>(
         elements, nullptr, HObjectAccess::ForFixedTypedArrayBaseBasePointer());
-    HValue* backing_store = AddUncasted<HAdd>(
-        external_pointer, base_pointer, Strength::WEAK, AddOfExternalAndTagged);
+    HValue* backing_store = AddUncasted<HAdd>(external_pointer, base_pointer,
+                                              AddOfExternalAndTagged);
 
     if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
       NoObservableSideEffectsScope no_effects(this);
@@ -3171,37 +3183,24 @@
                                     HIfContinuation* continuation,
                                     MapEmbedding map_embedding) {
   IfBuilder if_nil(this);
-  bool some_case_handled = false;
-  bool some_case_missing = false;
-
-  if (type->Maybe(Type::Null())) {
-    if (some_case_handled) if_nil.Or();
-    if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
-    some_case_handled = true;
-  } else {
-    some_case_missing = true;
-  }
-
-  if (type->Maybe(Type::Undefined())) {
-    if (some_case_handled) if_nil.Or();
-    if_nil.If<HCompareObjectEqAndBranch>(value,
-                                         graph()->GetConstantUndefined());
-    some_case_handled = true;
-  } else {
-    some_case_missing = true;
-  }
 
   if (type->Maybe(Type::Undetectable())) {
-    if (some_case_handled) if_nil.Or();
     if_nil.If<HIsUndetectableAndBranch>(value);
-    some_case_handled = true;
   } else {
-    some_case_missing = true;
-  }
+    bool maybe_null = type->Maybe(Type::Null());
+    if (maybe_null) {
+      if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
+    }
 
-  if (some_case_missing) {
+    if (type->Maybe(Type::Undefined())) {
+      if (maybe_null) if_nil.Or();
+      if_nil.If<HCompareObjectEqAndBranch>(value,
+                                           graph()->GetConstantUndefined());
+    }
+
     if_nil.Then();
     if_nil.Else();
+
     if (type->NumClasses() == 1) {
       BuildCheckHeapObject(value);
       // For ICs, the map checked below is a sentinel map that gets replaced by
@@ -3547,14 +3546,14 @@
 
 
 HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
-    : HGraphBuilder(info),
+    : HGraphBuilder(info, CallInterfaceDescriptor()),
       function_state_(NULL),
       initial_function_state_(this, info, NORMAL_RETURN, 0),
       ast_context_(NULL),
       break_scope_(NULL),
       inlined_count_(0),
       globals_(10, info->zone()),
-      osr_(new(info->zone()) HOsrBuilder(this)) {
+      osr_(new (info->zone()) HOsrBuilder(this)) {
   // This is not initialized in the initializer list because the
   // constructor for the initial state relies on function_state_ == NULL
   // to know it's the initial state.
@@ -3641,7 +3640,7 @@
 }
 
 
-HGraph::HGraph(CompilationInfo* info)
+HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor)
     : isolate_(info->isolate()),
       next_block_id_(0),
       entry_block_(NULL),
@@ -3651,6 +3650,7 @@
       uint32_instructions_(NULL),
       osr_(NULL),
       info_(info),
+      descriptor_(descriptor),
       zone_(info->zone()),
       is_recursive_(false),
       use_optimistic_licm_(false),
@@ -3660,10 +3660,9 @@
       no_side_effects_scope_count_(0),
       disallow_adding_new_values_(false) {
   if (info->IsStub()) {
-    CallInterfaceDescriptor descriptor =
-        info->code_stub()->GetCallInterfaceDescriptor();
-    start_environment_ =
-        new (zone_) HEnvironment(zone_, descriptor.GetRegisterParameterCount());
+    // For stubs, explicitly add the context to the environment.
+    start_environment_ = new (zone_)
+        HEnvironment(zone_, descriptor.GetRegisterParameterCount() + 1);
   } else {
     if (info->is_tracking_positions()) {
       info->TraceInlinedFunction(info->shared_info(), SourcePosition::Unknown(),
@@ -4675,12 +4674,11 @@
   }
 
   AddInstruction(arguments_object);
-  graph()->SetArgumentsObject(arguments_object);
 
   // Handle the arguments and arguments shadow variables specially (they do
   // not have declarations).
   if (scope->arguments() != NULL) {
-    environment()->Bind(scope->arguments(), graph()->GetArgumentsObject());
+    environment()->Bind(scope->arguments(), arguments_object);
   }
 
   int rest_index;
@@ -4736,8 +4734,11 @@
         }
         AddInstruction(function);
         // Allocate a block context and store it to the stack frame.
-        HInstruction* inner_context = Add<HAllocateBlockContext>(
-            outer_context, function, scope->GetScopeInfo(isolate()));
+        HValue* scope_info = Add<HConstant>(scope->GetScopeInfo(isolate()));
+        Add<HPushArguments>(scope_info, function);
+        HInstruction* inner_context = Add<HCallRuntime>(
+            Runtime::FunctionForId(Runtime::kPushBlockContext), 2);
+        inner_context->SetFlag(HValue::kHasNoObservableSideEffects);
         HInstruction* instr = Add<HStoreFrameContext>(inner_context);
         set_scope(scope);
         environment()->BindContext(inner_context);
@@ -4955,9 +4956,8 @@
     // will always evaluate to true, in a value context the return value needs
     // to be a JSObject.
     if (context->IsTest()) {
-      TestContext* test = TestContext::cast(context);
       CHECK_ALIVE(VisitForEffect(stmt->expression()));
-      Goto(test->if_true(), state);
+      context->ReturnValue(graph()->GetConstantTrue());
     } else if (context->IsEffect()) {
       CHECK_ALIVE(VisitForEffect(stmt->expression()));
       Goto(function_return(), state);
@@ -5283,10 +5283,6 @@
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
 
-  if (!FLAG_optimize_for_in) {
-    return Bailout(kForInStatementOptimizationIsDisabled);
-  }
-
   if (!stmt->each()->IsVariableProxy() ||
       !stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
     return Bailout(kForInStatementWithNonLocalEachVariable);
@@ -5312,57 +5308,73 @@
 void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
                                             Variable* each_var,
                                             HValue* enumerable) {
-  HInstruction* map;
-  HInstruction* array;
-  HInstruction* enum_length;
+  Handle<Map> meta_map = isolate()->factory()->meta_map();
   bool fast = stmt->for_in_type() == ForInStatement::FAST_FOR_IN;
+  BuildCheckHeapObject(enumerable);
+  Add<HCheckInstanceType>(enumerable, HCheckInstanceType::IS_JS_RECEIVER);
+  Add<HSimulate>(stmt->ToObjectId());
   if (fast) {
-    map = Add<HForInPrepareMap>(enumerable);
-    Add<HSimulate>(stmt->PrepareId());
+    HForInPrepareMap* map = Add<HForInPrepareMap>(enumerable);
+    Push(map);
+    Add<HSimulate>(stmt->EnumId());
+    Drop(1);
+    Add<HCheckMaps>(map, meta_map);
 
-    array = Add<HForInCacheArray>(enumerable, map,
-                                  DescriptorArray::kEnumCacheBridgeCacheIndex);
-    enum_length = Add<HMapEnumLength>(map);
+    HForInCacheArray* array = Add<HForInCacheArray>(
+        enumerable, map, DescriptorArray::kEnumCacheBridgeCacheIndex);
+    HValue* enum_length = BuildEnumLength(map);
 
-    HInstruction* index_cache = Add<HForInCacheArray>(
+    HForInCacheArray* index_cache = Add<HForInCacheArray>(
         enumerable, map, DescriptorArray::kEnumCacheBridgeIndicesCacheIndex);
-    HForInCacheArray::cast(array)
-        ->set_index_cache(HForInCacheArray::cast(index_cache));
-  } else {
-    Add<HSimulate>(stmt->PrepareId());
-    {
-      NoObservableSideEffectsScope no_effects(this);
-      BuildJSObjectCheck(enumerable, 0);
-    }
-    Add<HSimulate>(stmt->ToObjectId());
+    array->set_index_cache(index_cache);
 
-    map = graph()->GetConstant1();
-    Runtime::FunctionId function_id = Runtime::kGetPropertyNamesFast;
+    Push(map);
+    Push(array);
+    Push(enum_length);
+    Add<HSimulate>(stmt->PrepareId());
+  } else {
+    Runtime::FunctionId function_id = Runtime::kForInEnumerate;
     Add<HPushArguments>(enumerable);
-    array = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
+    HCallRuntime* array =
+        Add<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
     Push(array);
     Add<HSimulate>(stmt->EnumId());
     Drop(1);
-    Handle<Map> array_map = isolate()->factory()->fixed_array_map();
-    HValue* check = Add<HCheckMaps>(array, array_map);
-    enum_length = AddLoadFixedArrayLength(array, check);
+
+    IfBuilder if_fast(this);
+    if_fast.If<HCompareMap>(array, meta_map);
+    if_fast.Then();
+    {
+      HValue* cache_map = array;
+      HForInCacheArray* cache = Add<HForInCacheArray>(
+          enumerable, cache_map, DescriptorArray::kEnumCacheBridgeCacheIndex);
+      HValue* enum_length = BuildEnumLength(cache_map);
+      Push(cache_map);
+      Push(cache);
+      Push(enum_length);
+      Add<HSimulate>(stmt->PrepareId(), FIXED_SIMULATE);
+    }
+    if_fast.Else();
+    {
+      Push(graph()->GetConstant1());
+      Push(array);
+      Push(AddLoadFixedArrayLength(array));
+      Add<HSimulate>(stmt->PrepareId(), FIXED_SIMULATE);
+    }
   }
 
-  HInstruction* start_index = Add<HConstant>(0);
-
-  Push(map);
-  Push(array);
-  Push(enum_length);
-  Push(start_index);
+  Push(graph()->GetConstant0());
 
   HBasicBlock* loop_entry = BuildLoopEntry(stmt);
 
   // Reload the values to ensure we have up-to-date values inside of the loop.
   // This is relevant especially for OSR where the values don't come from the
   // computation above, but from the OSR entry block.
-  enumerable = environment()->ExpressionStackAt(4);
   HValue* index = environment()->ExpressionStackAt(0);
   HValue* limit = environment()->ExpressionStackAt(1);
+  HValue* array = environment()->ExpressionStackAt(2);
+  HValue* type = environment()->ExpressionStackAt(3);
+  enumerable = environment()->ExpressionStackAt(4);
 
   // Check that we still have more keys.
   HCompareNumericAndBranch* compare_index =
@@ -5382,32 +5394,67 @@
 
   set_current_block(loop_body);
 
-  HValue* key =
-      Add<HLoadKeyed>(environment()->ExpressionStackAt(2),  // Enum cache.
-                      index, index, nullptr, FAST_ELEMENTS);
+  // Compute the next enumerated value.
+  HValue* key = Add<HLoadKeyed>(array, index, index, nullptr, FAST_ELEMENTS);
 
+  HBasicBlock* continue_block = nullptr;
   if (fast) {
-    // Check if the expected map still matches that of the enumerable.
-    // If not just deoptimize.
-    Add<HCheckMapValue>(enumerable, environment()->ExpressionStackAt(3));
-    Bind(each_var, key);
-  } else {
-    Add<HPushArguments>(enumerable, key);
-    Runtime::FunctionId function_id = Runtime::kForInFilter;
-    key = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 2);
-    Push(key);
+    // Check if expected map still matches that of the enumerable.
+    Add<HCheckMapValue>(enumerable, type);
     Add<HSimulate>(stmt->FilterId());
+  } else {
+    // We need the continue block here to be able to skip over invalidated keys.
+    continue_block = graph()->CreateBasicBlock();
+
+    // We cannot use the IfBuilder here, since we need to be able to jump
+    // over the loop body in case of undefined result from %ForInFilter,
+    // and the poor soul that is the IfBuilder get's really confused about
+    // such "advanced control flow requirements".
+    HBasicBlock* if_fast = graph()->CreateBasicBlock();
+    HBasicBlock* if_slow = graph()->CreateBasicBlock();
+    HBasicBlock* if_slow_pass = graph()->CreateBasicBlock();
+    HBasicBlock* if_slow_skip = graph()->CreateBasicBlock();
+    HBasicBlock* if_join = graph()->CreateBasicBlock();
+
+    // Check if expected map still matches that of the enumerable.
+    HValue* enumerable_map =
+        Add<HLoadNamedField>(enumerable, nullptr, HObjectAccess::ForMap());
+    FinishCurrentBlock(
+        New<HCompareObjectEqAndBranch>(enumerable_map, type, if_fast, if_slow));
+    set_current_block(if_fast);
+    {
+      // The enum cache for enumerable is still valid, no need to check key.
+      Push(key);
+      Goto(if_join);
+    }
+    set_current_block(if_slow);
+    {
+      // Check if key is still valid for enumerable.
+      Add<HPushArguments>(enumerable, key);
+      Runtime::FunctionId function_id = Runtime::kForInFilter;
+      Push(Add<HCallRuntime>(Runtime::FunctionForId(function_id), 2));
+      Add<HSimulate>(stmt->FilterId());
+      FinishCurrentBlock(New<HCompareObjectEqAndBranch>(
+          Top(), graph()->GetConstantUndefined(), if_slow_skip, if_slow_pass));
+    }
+    set_current_block(if_slow_pass);
+    { Goto(if_join); }
+    set_current_block(if_slow_skip);
+    {
+      // The key is no longer valid for enumerable, skip it.
+      Drop(1);
+      Goto(continue_block);
+    }
+    if_join->SetJoinId(stmt->FilterId());
+    set_current_block(if_join);
     key = Pop();
-    Bind(each_var, key);
-    IfBuilder if_undefined(this);
-    if_undefined.If<HCompareObjectEqAndBranch>(key,
-                                               graph()->GetConstantUndefined());
-    if_undefined.ThenDeopt(Deoptimizer::kUndefined);
-    if_undefined.End();
-    Add<HSimulate>(stmt->AssignmentId());
   }
 
+  Bind(each_var, key);
+  Add<HSimulate>(stmt->AssignmentId());
+
   BreakAndContinueInfo break_info(stmt, scope(), 5);
+  break_info.set_continue_block(continue_block);
   {
     BreakAndContinueScope push(&break_info, this);
     CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
@@ -5420,7 +5467,10 @@
     set_current_block(body_exit);
 
     HValue* current_index = Pop();
-    Push(AddUncasted<HAdd>(current_index, graph()->GetConstant1()));
+    HValue* increment =
+        AddUncasted<HAdd>(current_index, graph()->GetConstant1());
+    increment->ClearFlag(HValue::kCanOverflow);
+    Push(increment);
     body_exit = current_block();
   }
 
@@ -5638,7 +5688,7 @@
           Handle<Context> script_context = ScriptContextTable::GetContext(
               script_contexts, lookup.context_index);
           Handle<Object> current_value =
-              FixedArray::get(script_context, lookup.slot_index);
+              FixedArray::get(*script_context, lookup.slot_index, isolate());
 
           // If the values is not the hole, it will stay initialized,
           // so no need to generate a check.
@@ -6069,9 +6119,7 @@
 
   for (int i = 0; i < length; i++) {
     Expression* subexpr = subexprs->at(i);
-    if (subexpr->IsSpread()) {
-      return Bailout(kSpread);
-    }
+    DCHECK(!subexpr->IsSpread());
 
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
@@ -6221,6 +6269,13 @@
   return instr;
 }
 
+Handle<FieldType>
+HOptimizedGraphBuilder::PropertyAccessInfo::GetFieldTypeFromMap(
+    Handle<Map> map) const {
+  DCHECK(IsFound());
+  DCHECK(number_ < map->NumberOfOwnDescriptors());
+  return handle(map->instance_descriptors()->GetFieldType(number_), isolate());
+}
 
 bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
     PropertyAccessInfo* info) {
@@ -6316,15 +6371,15 @@
     Object* raw_accessor =
         IsLoad() ? Handle<AccessorPair>::cast(accessors)->getter()
                  : Handle<AccessorPair>::cast(accessors)->setter();
-    if (!raw_accessor->IsJSFunction()) return false;
-    Handle<JSFunction> accessor = handle(JSFunction::cast(raw_accessor));
-    if (accessor->shared()->IsApiFunction()) {
-      CallOptimization call_optimization(accessor);
-      if (call_optimization.is_simple_api_call()) {
-        CallOptimization::HolderLookup holder_lookup;
-        api_holder_ =
-            call_optimization.LookupHolderOfExpectedType(map_, &holder_lookup);
-      }
+    if (!raw_accessor->IsJSFunction() &&
+        !raw_accessor->IsFunctionTemplateInfo())
+      return false;
+    Handle<Object> accessor = handle(HeapObject::cast(raw_accessor));
+    CallOptimization call_optimization(accessor);
+    if (call_optimization.is_simple_api_call()) {
+      CallOptimization::HolderLookup holder_lookup;
+      api_holder_ =
+          call_optimization.LookupHolderOfExpectedType(map_, &holder_lookup);
     }
     accessor_ = accessor;
   } else if (IsDataConstant()) {
@@ -6342,35 +6397,24 @@
   field_type_ = HType::Tagged();
 
   // Figure out the field type from the accessor map.
-  Handle<HeapType> field_type = GetFieldTypeFromMap(map);
+  Handle<FieldType> field_type = GetFieldTypeFromMap(map);
 
   // Collect the (stable) maps from the field type.
-  int num_field_maps = field_type->NumClasses();
-  if (num_field_maps > 0) {
+  if (field_type->IsClass()) {
     DCHECK(access_.representation().IsHeapObject());
-    field_maps_.Reserve(num_field_maps, zone());
-    HeapType::Iterator<Map> it = field_type->Classes();
-    while (!it.Done()) {
-      Handle<Map> field_map = it.Current();
-      if (!field_map->is_stable()) {
-        field_maps_.Clear();
-        break;
-      }
+    Handle<Map> field_map = field_type->AsClass();
+    if (field_map->is_stable()) {
       field_maps_.Add(field_map, zone());
-      it.Advance();
     }
   }
 
   if (field_maps_.is_empty()) {
     // Store is not safe if the field map was cleared.
-    return IsLoad() || !field_type->Is(HeapType::None());
+    return IsLoad() || !field_type->IsNone();
   }
 
-  field_maps_.Sort();
-  DCHECK_EQ(num_field_maps, field_maps_.length());
-
-  // Determine field HType from field HeapType.
-  field_type_ = HType::FromType<HeapType>(field_type);
+  // Determine field HType from field type.
+  field_type_ = HType::FromFieldType(field_type, zone());
   DCHECK(field_type_.IsHeapObject());
 
   // Add dependency on the map that introduced the field.
@@ -6381,6 +6425,10 @@
 
 bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
   Handle<Map> map = this->map();
+  if (name_->IsPrivate()) {
+    NotFound();
+    return !map->has_hidden_prototype();
+  }
 
   while (map->prototype()->IsJSObject()) {
     holder_ = handle(JSObject::cast(map->prototype()));
@@ -6573,7 +6621,8 @@
       Push(value);
     }
 
-    if (info->NeedsWrappingFor(info->accessor())) {
+    if (info->accessor()->IsJSFunction() &&
+        info->NeedsWrappingFor(Handle<JSFunction>::cast(info->accessor()))) {
       HValue* function = Add<HConstant>(info->accessor());
       PushArgumentsFromEnvironment(argument_count);
       return New<HCallFunction>(function, argument_count,
@@ -6587,7 +6636,12 @@
     }
 
     PushArgumentsFromEnvironment(argument_count);
-    return BuildCallConstantFunction(info->accessor(), argument_count);
+    if (!info->accessor()->IsJSFunction()) {
+      Bailout(kInliningBailedOut);
+      return nullptr;
+    }
+    return BuildCallConstantFunction(Handle<JSFunction>::cast(info->accessor()),
+                                     argument_count);
   }
 
   DCHECK(info->IsDataConstant());
@@ -6744,18 +6798,48 @@
   }
 }
 
-
-static bool ComputeReceiverTypes(Expression* expr,
-                                 HValue* receiver,
+static bool ComputeReceiverTypes(Expression* expr, HValue* receiver,
                                  SmallMapList** t,
-                                 Zone* zone) {
+                                 HOptimizedGraphBuilder* builder) {
+  Zone* zone = builder->zone();
   SmallMapList* maps = expr->GetReceiverTypes();
   *t = maps;
   bool monomorphic = expr->IsMonomorphic();
   if (maps != NULL && receiver->HasMonomorphicJSObjectType()) {
-    Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
-    maps->FilterForPossibleTransitions(root_map);
-    monomorphic = maps->length() == 1;
+    if (maps->length() > 0) {
+      Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
+      maps->FilterForPossibleTransitions(root_map);
+      monomorphic = maps->length() == 1;
+    } else {
+      // No type feedback, see if we can infer the type. This is safely
+      // possible if the receiver had a known map at some point, and no
+      // map-changing stores have happened to it since.
+      Handle<Map> candidate_map = receiver->GetMonomorphicJSObjectMap();
+      if (candidate_map->is_observed()) return false;
+      for (HInstruction* current = builder->current_block()->last();
+           current != nullptr; current = current->previous()) {
+        if (current->IsBlockEntry()) break;
+        if (current->CheckChangesFlag(kMaps)) {
+          // Only allow map changes that store the candidate map. We don't
+          // need to care which object the map is being written into.
+          if (!current->IsStoreNamedField()) break;
+          HStoreNamedField* map_change = HStoreNamedField::cast(current);
+          if (!map_change->value()->IsConstant()) break;
+          HConstant* map_constant = HConstant::cast(map_change->value());
+          if (!map_constant->representation().IsTagged()) break;
+          Handle<Object> map = map_constant->handle(builder->isolate());
+          if (!map.is_identical_to(candidate_map)) break;
+        }
+        if (current == receiver) {
+          // We made it all the way back to the receiver without encountering
+          // a map change! So we can assume that the receiver still has the
+          // candidate_map we know about.
+          maps->Add(candidate_map, zone);
+          monomorphic = true;
+          break;
+        }
+      }
+    }
   }
   return monomorphic && CanInlinePropertyAccess(maps->first());
 }
@@ -6846,7 +6930,7 @@
           ScriptContextTable::GetContext(script_contexts, lookup.context_index);
 
       Handle<Object> current_value =
-          FixedArray::get(script_context, lookup.slot_index);
+          FixedArray::get(*script_context, lookup.slot_index, isolate());
 
       // If the values is not the hole, it will stay initialized,
       // so no need to generate a check.
@@ -7246,14 +7330,14 @@
       // use a generic Keyed Load if we are using the type vector, because
       // it has to share information with full code.
       HConstant* key = Add<HConstant>(name);
-      HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(
-          object, key, function_language_mode(), PREMONOMORPHIC);
+      HLoadKeyedGeneric* result =
+          New<HLoadKeyedGeneric>(object, key, PREMONOMORPHIC);
       result->SetVectorAndSlot(vector, slot);
       return result;
     }
 
-    HLoadNamedGeneric* result = New<HLoadNamedGeneric>(
-        object, name, function_language_mode(), PREMONOMORPHIC);
+    HLoadNamedGeneric* result =
+        New<HLoadNamedGeneric>(object, name, PREMONOMORPHIC);
     result->SetVectorAndSlot(vector, slot);
     return result;
   } else {
@@ -7287,8 +7371,8 @@
     HValue* object, HValue* key, HValue* value) {
   if (access_type == LOAD) {
     InlineCacheState initial_state = expr->AsProperty()->GetInlineCacheState();
-    HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(
-        object, key, function_language_mode(), initial_state);
+    HLoadKeyedGeneric* result =
+        New<HLoadKeyedGeneric>(object, key, initial_state);
     // HLoadKeyedGeneric with vector ics benefits from being encoded as
     // MEGAMORPHIC because the vector/slot combo becomes unnecessary.
     if (initial_state != MEGAMORPHIC) {
@@ -7370,8 +7454,8 @@
 
 
 static bool CanInlineElementAccess(Handle<Map> map) {
-  return map->IsJSObjectMap() && !map->has_dictionary_elements() &&
-         !map->has_sloppy_arguments_elements() &&
+  return map->IsJSObjectMap() &&
+         (map->has_fast_elements() || map->has_fixed_typed_array_elements()) &&
          !map->has_indexed_interceptor() && !map->is_access_check_needed();
 }
 
@@ -7664,7 +7748,7 @@
   HInstruction* instr = NULL;
 
   SmallMapList* maps;
-  bool monomorphic = ComputeReceiverTypes(expr, obj, &maps, zone());
+  bool monomorphic = ComputeReceiverTypes(expr, obj, &maps, this);
 
   bool force_generic = false;
   if (expr->GetKeyType() == PROPERTY) {
@@ -7790,7 +7874,7 @@
       result = New<HConstant>(argument_count);
     }
   } else {
-    Push(graph()->GetArgumentsObject());
+    CHECK_ALIVE_OR_RETURN(VisitForValue(expr->obj(), ARGUMENTS_ALLOWED), true);
     CHECK_ALIVE_OR_RETURN(VisitForValue(expr->key()), true);
     HValue* key = Pop();
     Drop(1);  // Arguments object.
@@ -7821,7 +7905,7 @@
     Expression* expr, FeedbackVectorSlot slot, HValue* object,
     Handle<Name> name, HValue* value, bool is_uninitialized) {
   SmallMapList* maps;
-  ComputeReceiverTypes(expr, object, &maps, zone());
+  ComputeReceiverTypes(expr, object, &maps, this);
   DCHECK(maps != NULL);
 
   if (maps->length() > 0) {
@@ -8341,10 +8425,12 @@
   CompilationInfo target_info(&parse_info);
   Handle<SharedFunctionInfo> target_shared(target->shared());
 
-  if (IsClassConstructor(target_shared->kind())) {
+  if (inlining_kind != CONSTRUCT_CALL_RETURN &&
+      IsClassConstructor(target_shared->kind())) {
     TraceInline(target, caller, "target is classConstructor");
     return false;
   }
+
   if (target_shared->HasDebugInfo()) {
     TraceInline(target, caller, "target is being debugged");
     return false;
@@ -8531,7 +8617,7 @@
       // return value will always evaluate to true, in a value context the
       // return value is the newly allocated receiver.
       if (call_context()->IsTest()) {
-        Goto(inlined_test_context()->if_true(), state);
+        inlined_test_context()->ReturnValue(graph()->GetConstantTrue());
       } else if (call_context()->IsEffect()) {
         Goto(function_return(), state);
       } else {
@@ -8554,7 +8640,7 @@
       // Falling off the end of a normal inlined function. This basically means
       // returning undefined.
       if (call_context()->IsTest()) {
-        Goto(inlined_test_context()->if_false(), state);
+        inlined_test_context()->ReturnValue(graph()->GetConstantFalse());
       } else if (call_context()->IsEffect()) {
         Goto(function_return(), state);
       } else {
@@ -8617,24 +8703,25 @@
                    CONSTRUCT_CALL_RETURN);
 }
 
-
-bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+bool HOptimizedGraphBuilder::TryInlineGetter(Handle<Object> getter,
                                              Handle<Map> receiver_map,
                                              BailoutId ast_id,
                                              BailoutId return_id) {
   if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
-  return TryInline(getter, 0, NULL, ast_id, return_id, GETTER_CALL_RETURN);
+  return getter->IsJSFunction() &&
+         TryInline(Handle<JSFunction>::cast(getter), 0, NULL, ast_id, return_id,
+                   GETTER_CALL_RETURN);
 }
 
-
-bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+bool HOptimizedGraphBuilder::TryInlineSetter(Handle<Object> setter,
                                              Handle<Map> receiver_map,
                                              BailoutId id,
                                              BailoutId assignment_id,
                                              HValue* implicit_return_value) {
   if (TryInlineApiSetter(setter, receiver_map, id)) return true;
-  return TryInline(setter, 1, implicit_return_value, id, assignment_id,
-                   SETTER_CALL_RETURN);
+  return setter->IsJSFunction() &&
+         TryInline(Handle<JSFunction>::cast(setter), 1, implicit_return_value,
+                   id, assignment_id, SETTER_CALL_RETURN);
 }
 
 
@@ -8694,7 +8781,8 @@
   Isolate* isolate = jsarray_map->GetIsolate();
   Handle<Name> length_string = isolate->factory()->length_string();
   DescriptorArray* descriptors = jsarray_map->instance_descriptors();
-  int number = descriptors->SearchWithCache(*length_string, *jsarray_map);
+  int number =
+      descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
   DCHECK_NE(DescriptorArray::kNotFound, number);
   return descriptors->GetDetails(number).IsReadOnly();
 }
@@ -9058,6 +9146,7 @@
     case kArrayLastIndexOf: {
       if (receiver_map.is_null()) return false;
       if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+      if (!receiver_map->prototype()->IsJSObject()) return false;
       ElementsKind kind = receiver_map->elements_kind();
       if (!IsFastElementsKind(kind)) return false;
       if (receiver_map->is_observed()) return false;
@@ -9127,8 +9216,7 @@
                           kCallApiMethod);
 }
 
-
-bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<JSFunction> function,
+bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<Object> function,
                                                 Handle<Map> receiver_map,
                                                 BailoutId ast_id) {
   SmallMapList receiver_maps(1, zone());
@@ -9141,8 +9229,7 @@
                           kCallApiGetter);
 }
 
-
-bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<JSFunction> function,
+bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<Object> function,
                                                 Handle<Map> receiver_map,
                                                 BailoutId ast_id) {
   SmallMapList receiver_maps(1, zone());
@@ -9155,15 +9242,14 @@
                           kCallApiSetter);
 }
 
-
-bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
-                                               HValue* receiver,
-                                               SmallMapList* receiver_maps,
-                                               int argc,
-                                               BailoutId ast_id,
-                                               ApiCallType call_type) {
-  if (function->context()->native_context() !=
-      top_info()->closure()->context()->native_context()) {
+bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<Object> function,
+                                              HValue* receiver,
+                                              SmallMapList* receiver_maps,
+                                              int argc, BailoutId ast_id,
+                                              ApiCallType call_type) {
+  if (function->IsJSFunction() &&
+      Handle<JSFunction>::cast(function)->context()->native_context() !=
+          top_info()->closure()->context()->native_context()) {
     return false;
   }
   CallOptimization optimization(function);
@@ -9178,8 +9264,11 @@
     // Cannot embed a direct reference to the global proxy map
     // as it maybe dropped on deserialization.
     CHECK(!isolate()->serializer_enabled());
+    DCHECK(function->IsJSFunction());
     DCHECK_EQ(0, receiver_maps->length());
-    receiver_maps->Add(handle(function->global_proxy()->map()), zone());
+    receiver_maps->Add(
+        handle(Handle<JSFunction>::cast(function)->global_proxy()->map()),
+        zone());
   }
   CallOptimization::HolderLookup holder_lookup =
       CallOptimization::kHolderNotFound;
@@ -9259,7 +9348,8 @@
 
   HInstruction* call = nullptr;
   if (!is_function) {
-    CallApiAccessorStub stub(isolate(), is_store, call_data_undefined);
+    CallApiAccessorStub stub(isolate(), is_store, call_data_undefined,
+                             !optimization.is_constant_call());
     Handle<Code> code = stub.GetCode();
     HConstant* code_value = Add<HConstant>(code);
     ApiAccessorDescriptor descriptor(isolate());
@@ -9643,6 +9733,9 @@
 
 
 void HOptimizedGraphBuilder::VisitCall(Call* expr) {
+  if (expr->tail_call_mode() == TailCallMode::kAllow) {
+    return Bailout(kTailCall);
+  }
   DCHECK(!HasStackOverflow());
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
@@ -9657,7 +9750,7 @@
     HValue* receiver = Top();
 
     SmallMapList* maps;
-    ComputeReceiverTypes(expr, receiver, &maps, zone());
+    ComputeReceiverTypes(expr, receiver, &maps, this);
 
     if (prop->key()->IsPropertyName() && maps->length() > 0) {
       Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
@@ -9852,7 +9945,7 @@
 // Checks whether allocation using the given constructor can be inlined.
 static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
   return constructor->has_initial_map() &&
-         !IsClassConstructor(constructor->shared()->kind()) &&
+         !IsSubclassConstructor(constructor->shared()->kind()) &&
          constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
          constructor->initial_map()->instance_size() <
              HAllocate::kMaxInlineSize;
@@ -10105,31 +10198,6 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateDataViewInitialize(
-    CallRuntime* expr) {
-  ZoneList<Expression*>* arguments = expr->arguments();
-
-  DCHECK(arguments->length()== 4);
-  CHECK_ALIVE(VisitForValue(arguments->at(0)));
-  HValue* obj = Pop();
-
-  CHECK_ALIVE(VisitForValue(arguments->at(1)));
-  HValue* buffer = Pop();
-
-  CHECK_ALIVE(VisitForValue(arguments->at(2)));
-  HValue* byte_offset = Pop();
-
-  CHECK_ALIVE(VisitForValue(arguments->at(3)));
-  HValue* byte_length = Pop();
-
-  {
-    NoObservableSideEffectsScope scope(this);
-    BuildArrayBufferViewInitialization<JSDataView>(
-        obj, buffer, byte_offset, byte_length);
-  }
-}
-
-
 HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
     ExternalArrayType array_type,
     bool is_zero_byte_offset,
@@ -10227,7 +10295,7 @@
 
     HValue* backing_store = AddUncasted<HAdd>(
         Add<HConstant>(ExternalReference::fixed_typed_array_base_data_offset()),
-        elements, Strength::WEAK, AddOfExternalAndTagged);
+        elements, AddOfExternalAndTagged);
 
     HValue* key = builder.BeginBody(
         Add<HConstant>(static_cast<int32_t>(0)),
@@ -10612,8 +10680,7 @@
   HConstant* delta = (expr->op() == Token::INC)
       ? graph()->GetConstant1()
       : graph()->GetConstantMinus1();
-  HInstruction* instr =
-      AddUncasted<HAdd>(Top(), delta, strength(function_language_mode()));
+  HInstruction* instr = AddUncasted<HAdd>(Top(), delta);
   if (instr->IsAdd()) {
     HAdd* add = HAdd::cast(instr);
     add->set_observed_input_representation(1, rep);
@@ -10855,7 +10922,7 @@
     Maybe<HConstant*> number =
         constant->CopyToTruncatedNumber(isolate(), zone());
     if (number.IsJust()) {
-      *expected = Type::Number(zone());
+      *expected = Type::Number();
       return AddInstruction(number.FromJust());
     }
   }
@@ -10869,20 +10936,20 @@
 
   // Separate the number type from the rest.
   Type* expected_obj =
-      Type::Intersect(expected_type, Type::NonNumber(zone()), zone());
+      Type::Intersect(expected_type, Type::NonNumber(), zone());
   Type* expected_number =
-      Type::Intersect(expected_type, Type::Number(zone()), zone());
+      Type::Intersect(expected_type, Type::Number(), zone());
 
   // We expect to get a number.
   // (We need to check first, since Type::None->Is(Type::Any()) == true.
   if (expected_obj->Is(Type::None())) {
-    DCHECK(!expected_number->Is(Type::None(zone())));
+    DCHECK(!expected_number->Is(Type::None()));
     return value;
   }
 
-  if (expected_obj->Is(Type::Undefined(zone()))) {
+  if (expected_obj->Is(Type::Undefined())) {
     // This is already done by HChange.
-    *expected = Type::Union(expected_number, Type::Number(zone()), zone());
+    *expected = Type::Union(expected_number, Type::Number(), zone());
     return value;
   }
 
@@ -10907,8 +10974,7 @@
   }
   HValue* result = HGraphBuilder::BuildBinaryOperation(
       expr->op(), left, right, left_type, right_type, result_type,
-      fixed_right_arg, allocation_mode, strength(function_language_mode()),
-      expr->id());
+      fixed_right_arg, allocation_mode, expr->id());
   // Add a simulate after instructions with observable side effects, and
   // after phis, which are the result of BuildBinaryOperation when we
   // inlined some complex subgraph.
@@ -10924,11 +10990,12 @@
   return result;
 }
 
-
-HValue* HGraphBuilder::BuildBinaryOperation(
-    Token::Value op, HValue* left, HValue* right, Type* left_type,
-    Type* right_type, Type* result_type, Maybe<int> fixed_right_arg,
-    HAllocationMode allocation_mode, Strength strength, BailoutId opt_id) {
+HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
+                                            HValue* right, Type* left_type,
+                                            Type* right_type, Type* result_type,
+                                            Maybe<int> fixed_right_arg,
+                                            HAllocationMode allocation_mode,
+                                            BailoutId opt_id) {
   bool maybe_string_add = false;
   if (op == Token::ADD) {
     // If we are adding constant string with something for which we don't have
@@ -10957,7 +11024,7 @@
     Add<HDeoptimize>(
         Deoptimizer::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
         Deoptimizer::SOFT);
-    left_type = Type::Any(zone());
+    left_type = Type::Any();
     left_rep = RepresentationFor(left_type);
     maybe_string_add = op == Token::ADD;
   }
@@ -10966,12 +11033,12 @@
     Add<HDeoptimize>(
         Deoptimizer::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
         Deoptimizer::SOFT);
-    right_type = Type::Any(zone());
+    right_type = Type::Any();
     right_rep = RepresentationFor(right_type);
     maybe_string_add = op == Token::ADD;
   }
 
-  if (!maybe_string_add && !is_strong(strength)) {
+  if (!maybe_string_add) {
     left = TruncateToNumber(left, &left_type);
     right = TruncateToNumber(right, &right_type);
   }
@@ -10979,43 +11046,36 @@
   // Special case for string addition here.
   if (op == Token::ADD &&
       (left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
-    if (is_strong(strength)) {
-      // In strong mode, if the one side of an addition is a string,
-      // the other side must be a string too.
+    // Validate type feedback for left argument.
+    if (left_type->Is(Type::String())) {
       left = BuildCheckString(left);
+    }
+
+    // Validate type feedback for right argument.
+    if (right_type->Is(Type::String())) {
       right = BuildCheckString(right);
-    } else {
-      // Validate type feedback for left argument.
-      if (left_type->Is(Type::String())) {
-        left = BuildCheckString(left);
-      }
+    }
 
-      // Validate type feedback for right argument.
-      if (right_type->Is(Type::String())) {
-        right = BuildCheckString(right);
-      }
+    // Convert left argument as necessary.
+    if (left_type->Is(Type::Number())) {
+      DCHECK(right_type->Is(Type::String()));
+      left = BuildNumberToString(left, left_type);
+    } else if (!left_type->Is(Type::String())) {
+      DCHECK(right_type->Is(Type::String()));
+      return AddUncasted<HStringAdd>(
+          left, right, allocation_mode.GetPretenureMode(),
+          STRING_ADD_CONVERT_LEFT, allocation_mode.feedback_site());
+    }
 
-      // Convert left argument as necessary.
-      if (left_type->Is(Type::Number())) {
-        DCHECK(right_type->Is(Type::String()));
-        left = BuildNumberToString(left, left_type);
-      } else if (!left_type->Is(Type::String())) {
-        DCHECK(right_type->Is(Type::String()));
-        return AddUncasted<HStringAdd>(
-            left, right, allocation_mode.GetPretenureMode(),
-            STRING_ADD_CONVERT_LEFT, allocation_mode.feedback_site());
-      }
-
-      // Convert right argument as necessary.
-      if (right_type->Is(Type::Number())) {
-        DCHECK(left_type->Is(Type::String()));
-        right = BuildNumberToString(right, right_type);
-      } else if (!right_type->Is(Type::String())) {
-        DCHECK(left_type->Is(Type::String()));
-        return AddUncasted<HStringAdd>(
-            left, right, allocation_mode.GetPretenureMode(),
-            STRING_ADD_CONVERT_RIGHT, allocation_mode.feedback_site());
-      }
+    // Convert right argument as necessary.
+    if (right_type->Is(Type::Number())) {
+      DCHECK(left_type->Is(Type::String()));
+      right = BuildNumberToString(right, right_type);
+    } else if (!right_type->Is(Type::String())) {
+      DCHECK(left_type->Is(Type::String()));
+      return AddUncasted<HStringAdd>(
+          left, right, allocation_mode.GetPretenureMode(),
+          STRING_ADD_CONVERT_RIGHT, allocation_mode.feedback_site());
     }
 
     // Fast paths for empty constant strings.
@@ -11064,6 +11124,16 @@
         allocation_mode.feedback_site());
   }
 
+  // Special case for +x here.
+  if (op == Token::MUL) {
+    if (left->EqualsInteger32Constant(1)) {
+      return BuildToNumber(right);
+    }
+    if (right->EqualsInteger32Constant(1)) {
+      return BuildToNumber(left);
+    }
+  }
+
   if (graph()->info()->IsStub()) {
     left = EnforceNumberType(left, left_type);
     right = EnforceNumberType(right, right_type);
@@ -11084,78 +11154,51 @@
       default:
         UNREACHABLE();
       case Token::ADD:
-        function_id =
-            is_strong(strength) ? Runtime::kAdd_Strong : Runtime::kAdd;
+        function_id = Runtime::kAdd;
         break;
       case Token::SUB:
-        function_id = is_strong(strength) ? Runtime::kSubtract_Strong
-                                          : Runtime::kSubtract;
+        function_id = Runtime::kSubtract;
         break;
       case Token::MUL:
-        function_id = is_strong(strength) ? Runtime::kMultiply_Strong
-                                          : Runtime::kMultiply;
+        function_id = Runtime::kMultiply;
         break;
       case Token::DIV:
-        function_id =
-            is_strong(strength) ? Runtime::kDivide_Strong : Runtime::kDivide;
+        function_id = Runtime::kDivide;
         break;
       case Token::MOD:
-        function_id =
-            is_strong(strength) ? Runtime::kModulus_Strong : Runtime::kModulus;
+        function_id = Runtime::kModulus;
         break;
       case Token::BIT_OR:
-        function_id = is_strong(strength) ? Runtime::kBitwiseOr_Strong
-                                          : Runtime::kBitwiseOr;
+        function_id = Runtime::kBitwiseOr;
         break;
       case Token::BIT_AND:
-        function_id = is_strong(strength) ? Runtime::kBitwiseAnd_Strong
-                                          : Runtime::kBitwiseAnd;
+        function_id = Runtime::kBitwiseAnd;
         break;
       case Token::BIT_XOR:
-        function_id = is_strong(strength) ? Runtime::kBitwiseXor_Strong
-                                          : Runtime::kBitwiseXor;
+        function_id = Runtime::kBitwiseXor;
         break;
       case Token::SAR:
-        function_id = is_strong(strength) ? Runtime::kShiftRight_Strong
-                                          : Runtime::kShiftRight;
+        function_id = Runtime::kShiftRight;
         break;
       case Token::SHR:
-        function_id = is_strong(strength) ? Runtime::kShiftRightLogical_Strong
-                                          : Runtime::kShiftRightLogical;
+        function_id = Runtime::kShiftRightLogical;
         break;
       case Token::SHL:
-        function_id = is_strong(strength) ? Runtime::kShiftLeft_Strong
-                                          : Runtime::kShiftLeft;
+        function_id = Runtime::kShiftLeft;
         break;
     }
     Add<HPushArguments>(left, right);
     instr = AddUncasted<HCallRuntime>(Runtime::FunctionForId(function_id), 2);
   } else {
-    if (is_strong(strength) && Token::IsBitOp(op)) {
-      // TODO(conradw): This is not efficient, but is necessary to prevent
-      // conversion of oddball values to numbers in strong mode. It would be
-      // better to prevent the conversion rather than adding a runtime check.
-      IfBuilder if_builder(this);
-      if_builder.If<HHasInstanceTypeAndBranch>(left, ODDBALL_TYPE);
-      if_builder.OrIf<HHasInstanceTypeAndBranch>(right, ODDBALL_TYPE);
-      if_builder.Then();
-      Add<HCallRuntime>(
-          Runtime::FunctionForId(Runtime::kThrowStrongModeImplicitConversion),
-          0);
-      if (!graph()->info()->IsStub()) {
-        Add<HSimulate>(opt_id, REMOVABLE_SIMULATE);
-      }
-      if_builder.End();
-    }
     switch (op) {
       case Token::ADD:
-        instr = AddUncasted<HAdd>(left, right, strength);
+        instr = AddUncasted<HAdd>(left, right);
         break;
       case Token::SUB:
-        instr = AddUncasted<HSub>(left, right, strength);
+        instr = AddUncasted<HSub>(left, right);
         break;
       case Token::MUL:
-        instr = AddUncasted<HMul>(left, right, strength);
+        instr = AddUncasted<HMul>(left, right);
         break;
       case Token::MOD: {
         if (fixed_right_arg.IsJust() &&
@@ -11168,38 +11211,38 @@
           if_same.ElseDeopt(Deoptimizer::kUnexpectedRHSOfBinaryOperation);
           right = fixed_right;
         }
-        instr = AddUncasted<HMod>(left, right, strength);
+        instr = AddUncasted<HMod>(left, right);
         break;
       }
       case Token::DIV:
-        instr = AddUncasted<HDiv>(left, right, strength);
+        instr = AddUncasted<HDiv>(left, right);
         break;
       case Token::BIT_XOR:
       case Token::BIT_AND:
-        instr = AddUncasted<HBitwise>(op, left, right, strength);
+        instr = AddUncasted<HBitwise>(op, left, right);
         break;
       case Token::BIT_OR: {
         HValue *operand, *shift_amount;
         if (left_type->Is(Type::Signed32()) &&
             right_type->Is(Type::Signed32()) &&
             MatchRotateRight(left, right, &operand, &shift_amount)) {
-          instr = AddUncasted<HRor>(operand, shift_amount, strength);
+          instr = AddUncasted<HRor>(operand, shift_amount);
         } else {
-          instr = AddUncasted<HBitwise>(op, left, right, strength);
+          instr = AddUncasted<HBitwise>(op, left, right);
         }
         break;
       }
       case Token::SAR:
-        instr = AddUncasted<HSar>(left, right, strength);
+        instr = AddUncasted<HSar>(left, right);
         break;
       case Token::SHR:
-        instr = AddUncasted<HShr>(left, right, strength);
+        instr = AddUncasted<HShr>(left, right);
         if (instr->IsShr() && CanBeZero(right)) {
           graph()->RecordUint32Instruction(instr);
         }
         break;
       case Token::SHL:
-        instr = AddUncasted<HShl>(left, right, strength);
+        instr = AddUncasted<HShl>(left, right);
         break;
       default:
         UNREACHABLE();
@@ -11520,7 +11563,7 @@
     Add<HDeoptimize>(
         Deoptimizer::kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
         Deoptimizer::SOFT);
-    combined_type = left_type = right_type = Type::Any(zone());
+    combined_type = left_type = right_type = Type::Any();
   }
 
   Representation left_rep = RepresentationFor(left_type);
@@ -11657,8 +11700,7 @@
     return result;
   } else {
     if (combined_rep.IsTagged() || combined_rep.IsNone()) {
-      HCompareGeneric* result = Add<HCompareGeneric>(
-          left, right, op, strength(function_language_mode()));
+      HCompareGeneric* result = Add<HCompareGeneric>(left, right, op);
       result->set_observed_input_representation(1, left_rep);
       result->set_observed_input_representation(2, right_rep);
       if (result->HasObservableSideEffects()) {
@@ -11674,8 +11716,8 @@
       HBranch* branch = New<HBranch>(result);
       return branch;
     } else {
-      HCompareNumericAndBranch* result = New<HCompareNumericAndBranch>(
-          left, right, op, strength(function_language_mode()));
+      HCompareNumericAndBranch* result =
+          New<HCompareNumericAndBranch>(left, right, op);
       result->set_observed_input_representation(left_rep, right_rep);
       if (top_info()->is_tracking_positions()) {
         result->SetOperandPositions(zone(), left_position, right_position);
@@ -11706,7 +11748,8 @@
   } else {
     DCHECK_EQ(Token::EQ, expr->op());
     Type* type = expr->combined_type()->Is(Type::None())
-        ? Type::Any(zone()) : expr->combined_type();
+                     ? Type::Any()
+                     : expr->combined_type();
     HIfContinuation continuation;
     BuildCompareNil(value, type, &continuation);
     return ast_context()->ReturnContinuation(&continuation, expr->id());
@@ -12149,8 +12192,8 @@
 }
 
 
-void HOptimizedGraphBuilder::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* node) {
+void HOptimizedGraphBuilder::VisitRewritableExpression(
+    RewritableExpression* node) {
   CHECK_ALIVE(Visit(node->expression()));
 }
 
@@ -12178,25 +12221,6 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HHasInstanceTypeAndBranch* result = New<HHasInstanceTypeAndBranch>(
-      value, FIRST_FUNCTION_TYPE, LAST_FUNCTION_TYPE);
-  return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsMinusZero(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HCompareMinusZeroAndBranch* result = New<HCompareMinusZeroAndBranch>(value);
-  return ast_context()->ReturnControl(result, call->id());
-}
-
-
 void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
   DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12264,6 +12288,30 @@
 }
 
 
+void HOptimizedGraphBuilder::GenerateToName(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* input = Pop();
+  if (input->type().IsSmi()) {
+    HValue* result = BuildNumberToString(input, Type::SignedSmall());
+    return ast_context()->ReturnValue(result);
+  } else if (input->type().IsTaggedNumber()) {
+    HValue* result = BuildNumberToString(input, Type::Number());
+    return ast_context()->ReturnValue(result);
+  } else if (input->type().IsString()) {
+    return ast_context()->ReturnValue(input);
+  } else {
+    Callable callable = CodeFactory::ToName(isolate());
+    HValue* stub = Add<HConstant>(callable.code());
+    HValue* values[] = {context(), input};
+    HInstruction* result =
+        New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+                                 Vector<HValue*>(values, arraysize(values)));
+    return ast_context()->ReturnInstruction(result, call->id());
+  }
+}
+
+
 void HOptimizedGraphBuilder::GenerateToObject(CallRuntime* call) {
   DCHECK_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12276,11 +12324,11 @@
 void HOptimizedGraphBuilder::GenerateToString(CallRuntime* call) {
   DCHECK_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  Callable callable = CodeFactory::ToString(isolate());
   HValue* input = Pop();
   if (input->type().IsString()) {
     return ast_context()->ReturnValue(input);
   } else {
+    Callable callable = CodeFactory::ToString(isolate());
     HValue* stub = Add<HConstant>(callable.code());
     HValue* values[] = {context(), input};
     HInstruction* result =
@@ -12310,16 +12358,13 @@
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   Callable callable = CodeFactory::ToNumber(isolate());
   HValue* input = Pop();
-  if (input->type().IsTaggedNumber()) {
-    return ast_context()->ReturnValue(input);
-  } else {
-    HValue* stub = Add<HConstant>(callable.code());
-    HValue* values[] = {context(), input};
-    HInstruction* result =
-        New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
-                                 Vector<HValue*>(values, arraysize(values)));
-    return ast_context()->ReturnInstruction(result, call->id());
+  HValue* result = BuildToNumber(input);
+  if (result->HasObservableSideEffects()) {
+    if (!ast_context()->IsEffect()) Push(result);
+    Add<HSimulate>(call->id(), REMOVABLE_SIMULATE);
+    if (!ast_context()->IsEffect()) result = Pop();
   }
+  return ast_context()->ReturnValue(result);
 }
 
 
@@ -12372,48 +12417,6 @@
 }
 
 
-// Support for arguments.length and arguments[?].
-void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 0);
-  HInstruction* result = NULL;
-  if (function_state()->outer() == NULL) {
-    HInstruction* elements = Add<HArgumentsElements>(false);
-    result = New<HArgumentsLength>(elements);
-  } else {
-    // Number of arguments without receiver.
-    int argument_count = environment()->
-        arguments_environment()->parameter_count() - 1;
-    result = New<HConstant>(argument_count);
-  }
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* index = Pop();
-  HInstruction* result = NULL;
-  if (function_state()->outer() == NULL) {
-    HInstruction* elements = Add<HArgumentsElements>(false);
-    HInstruction* length = Add<HArgumentsLength>(elements);
-    HInstruction* checked_index = Add<HBoundsCheck>(index, length);
-    result = New<HAccessArgumentsAt>(elements, length, checked_index);
-  } else {
-    EnsureArgumentsArePushedForAccess();
-
-    // Number of arguments without receiver.
-    HInstruction* elements = function_state()->arguments_elements();
-    int argument_count = environment()->
-        arguments_environment()->parameter_count() - 1;
-    HInstruction* length = Add<HConstant>(argument_count);
-    HInstruction* checked_key = Add<HBoundsCheck>(index, length);
-    result = New<HAccessArgumentsAt>(elements, length, checked_key);
-  }
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
 void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
   DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12442,27 +12445,6 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateJSValueGetValue(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HInstruction* result = Add<HLoadNamedField>(
-      value, nullptr,
-      HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset));
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsDate(CallRuntime* call) {
-  DCHECK_EQ(1, call->arguments()->length());
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HHasInstanceTypeAndBranch* result =
-      New<HHasInstanceTypeAndBranch>(value, JS_DATE_TYPE);
-  return ast_context()->ReturnControl(result, call->id());
-}
-
-
 void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
     CallRuntime* call) {
   DCHECK(call->arguments()->length() == 3);
@@ -12495,43 +12477,6 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 2);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
-  HValue* value = Pop();
-  HValue* object = Pop();
-
-  // Check if object is a JSValue.
-  IfBuilder if_objectisvalue(this);
-  if_objectisvalue.If<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE);
-  if_objectisvalue.Then();
-  {
-    // Create in-object property store to kValueOffset.
-    Add<HStoreNamedField>(object,
-        HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset),
-        value);
-    if (!ast_context()->IsEffect()) {
-      Push(value);
-    }
-    Add<HSimulate>(call->id(), FIXED_SIMULATE);
-  }
-  if_objectisvalue.Else();
-  {
-    // Nothing to do in this case.
-    if (!ast_context()->IsEffect()) {
-      Push(value);
-    }
-    Add<HSimulate>(call->id(), FIXED_SIMULATE);
-  }
-  if_objectisvalue.End();
-  if (!ast_context()->IsEffect()) {
-    Drop(1);
-  }
-  return ast_context()->ReturnValue(value);
-}
-
-
 // Fast support for charCodeAt(n).
 void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
   DCHECK(call->arguments()->length() == 2);
@@ -12568,25 +12513,18 @@
 }
 
 
-// Fast support for object equality testing.
-void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 2);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
-  HValue* right = Pop();
-  HValue* left = Pop();
-  HCompareObjectEqAndBranch* result =
-      New<HCompareObjectEqAndBranch>(left, right);
-  return ast_context()->ReturnControl(result, call->id());
-}
-
-
 // Fast support for SubString.
 void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
   DCHECK_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitExpressions(call->arguments()));
   PushArgumentsFromEnvironment(call->arguments()->length());
-  HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
+  Callable callable = CodeFactory::SubString(isolate());
+  HValue* stub = Add<HConstant>(callable.code());
+  HValue* values[] = {context()};
+  HInstruction* result = New<HCallWithDescriptor>(
+      stub, call->arguments()->length(), callable.descriptor(),
+      Vector<HValue*>(values, arraysize(values)));
+  result->set_type(HType::String());
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -12596,7 +12534,12 @@
   DCHECK_EQ(4, call->arguments()->length());
   CHECK_ALIVE(VisitExpressions(call->arguments()));
   PushArgumentsFromEnvironment(call->arguments()->length());
-  HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
+  Callable callable = CodeFactory::RegExpExec(isolate());
+  HValue* stub = Add<HConstant>(callable.code());
+  HValue* values[] = {context()};
+  HInstruction* result = New<HCallWithDescriptor>(
+      stub, call->arguments()->length(), callable.descriptor(),
+      Vector<HValue*>(values, arraysize(values)));
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -12669,7 +12612,7 @@
   DCHECK_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* number = Pop();
-  HValue* result = BuildNumberToString(number, Type::Any(zone()));
+  HValue* result = BuildNumberToString(number, Type::Any());
   return ast_context()->ReturnValue(result);
 }
 
@@ -12929,16 +12872,6 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateFastOneByteArrayJoin(CallRuntime* call) {
-  // Simply returning undefined here would be semantically correct and even
-  // avoid the bailout. Nevertheless, some ancient benchmarks like SunSpider's
-  // string-fasta would tank, because fullcode contains an optimized version.
-  // Obviously the fullcode => Crankshaft => bailout => fullcode dance is
-  // faster... *sigh*
-  return Bailout(kInlinedRuntimeFunctionFastOneByteArrayJoin);
-}
-
-
 void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
     CallRuntime* call) {
   Add<HDebugBreak>();
diff --git a/src/crankshaft/hydrogen.h b/src/crankshaft/hydrogen.h
index 40a1834..ce0d0df 100644
--- a/src/crankshaft/hydrogen.h
+++ b/src/crankshaft/hydrogen.h
@@ -297,11 +297,12 @@
 class InductionVariableBlocksTable;
 class HGraph final : public ZoneObject {
  public:
-  explicit HGraph(CompilationInfo* info);
+  explicit HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor);
 
   Isolate* isolate() const { return isolate_; }
   Zone* zone() const { return zone_; }
   CompilationInfo* info() const { return info_; }
+  CallInterfaceDescriptor descriptor() const { return descriptor_; }
 
   const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
   const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
@@ -345,13 +346,6 @@
   bool IsStandardConstant(HConstant* constant);
 
   HBasicBlock* CreateBasicBlock();
-  HArgumentsObject* GetArgumentsObject() const {
-    return arguments_object_.get();
-  }
-
-  void SetArgumentsObject(HArgumentsObject* object) {
-    arguments_object_.set(object);
-  }
 
   int GetMaximumValueID() const { return values_.length(); }
   int GetNextBlockID() { return next_block_id_++; }
@@ -481,11 +475,11 @@
   SetOncePointer<HConstant> constant_the_hole_;
   SetOncePointer<HConstant> constant_null_;
   SetOncePointer<HConstant> constant_invalid_context_;
-  SetOncePointer<HArgumentsObject> arguments_object_;
 
   HOsrBuilder* osr_;
 
   CompilationInfo* info_;
+  CallInterfaceDescriptor descriptor_;
   Zone* zone_;
 
   bool is_recursive_;
@@ -1006,8 +1000,10 @@
 
 class HGraphBuilder {
  public:
-  explicit HGraphBuilder(CompilationInfo* info)
+  explicit HGraphBuilder(CompilationInfo* info,
+                         CallInterfaceDescriptor descriptor)
       : info_(info),
+        descriptor_(descriptor),
         graph_(NULL),
         current_block_(NULL),
         scope_(info->scope()),
@@ -1294,6 +1290,8 @@
 
   HValue* BuildGetElementsKind(HValue* object);
 
+  HValue* BuildEnumLength(HValue* map);
+
   HValue* BuildCheckHeapObject(HValue* object);
   HValue* BuildCheckString(HValue* string);
   HValue* BuildWrapReceiver(HValue* object, HValue* function);
@@ -1323,6 +1321,7 @@
                                    bool is_jsarray);
 
   HValue* BuildNumberToString(HValue* object, Type* type);
+  HValue* BuildToNumber(HValue* input);
   HValue* BuildToObject(HValue* receiver);
 
   void BuildJSObjectCheck(HValue* receiver,
@@ -1349,8 +1348,7 @@
 
   HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
                                               HValue* elements, HValue* key,
-                                              HValue* hash,
-                                              LanguageMode language_mode);
+                                              HValue* hash);
 
   // ES6 section 7.4.7 CreateIterResultObject ( value, done )
   HValue* BuildCreateIterResultObject(HValue* value, HValue* done);
@@ -1429,7 +1427,6 @@
                                Type* left_type, Type* right_type,
                                Type* result_type, Maybe<int> fixed_right_arg,
                                HAllocationMode allocation_mode,
-                               Strength strength,
                                BailoutId opt_id = BailoutId::None());
 
   HLoadNamedField* AddLoadFixedArrayLength(HValue *object,
@@ -1912,6 +1909,7 @@
   }
 
   CompilationInfo* info_;
+  CallInterfaceDescriptor descriptor_;
   HGraph* graph_;
   HBasicBlock* current_block_;
   Scope* scope_;
@@ -2200,28 +2198,21 @@
   F(IsRegExp)                          \
   F(IsJSProxy)                         \
   F(Call)                              \
-  F(ArgumentsLength)                   \
-  F(Arguments)                         \
   F(ValueOf)                           \
-  F(SetValueOf)                        \
-  F(IsDate)                            \
   F(StringCharFromCode)                \
   F(StringCharAt)                      \
   F(OneByteSeqStringSetChar)           \
   F(TwoByteSeqStringSetChar)           \
-  F(ObjectEquals)                      \
   F(ToInteger)                         \
+  F(ToName)                            \
   F(ToObject)                          \
   F(ToString)                          \
   F(ToLength)                          \
   F(ToNumber)                          \
-  F(IsFunction)                        \
   F(IsJSReceiver)                      \
   F(MathPow)                           \
-  F(IsMinusZero)                       \
   F(HasCachedArrayIndex)               \
   F(GetCachedArrayIndex)               \
-  F(FastOneByteArrayJoin)              \
   F(DebugBreakInOptimizedCode)         \
   F(StringCharCodeAt)                  \
   F(SubString)                         \
@@ -2233,7 +2224,6 @@
   F(DebugIsActive)                     \
   /* Typed Arrays */                   \
   F(TypedArrayInitialize)              \
-  F(DataViewInitialize)                \
   F(MaxSmi)                            \
   F(TypedArrayMaxSizeInHeap)           \
   F(ArrayBufferViewGetByteLength)      \
@@ -2262,9 +2252,7 @@
   /* ES6 Iterators */                  \
   F(CreateIterResultObject)            \
   /* Arrays */                         \
-  F(HasFastPackedElements)             \
-  /* JSValue */                        \
-  F(JSValueGetValue)
+  F(HasFastPackedElements)
 
 #define GENERATOR_DECLARATION(Name) void Generate##Name(CallRuntime* call);
   FOR_EACH_HYDROGEN_INTRINSIC(GENERATOR_DECLARATION)
@@ -2420,14 +2408,10 @@
 
   bool TryInlineCall(Call* expr);
   bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
-  bool TryInlineGetter(Handle<JSFunction> getter,
-                       Handle<Map> receiver_map,
-                       BailoutId ast_id,
-                       BailoutId return_id);
-  bool TryInlineSetter(Handle<JSFunction> setter,
-                       Handle<Map> receiver_map,
-                       BailoutId id,
-                       BailoutId assignment_id,
+  bool TryInlineGetter(Handle<Object> getter, Handle<Map> receiver_map,
+                       BailoutId ast_id, BailoutId return_id);
+  bool TryInlineSetter(Handle<Object> setter, Handle<Map> receiver_map,
+                       BailoutId id, BailoutId assignment_id,
                        HValue* implicit_return_value);
   bool TryInlineIndirectCall(Handle<JSFunction> function, Call* expr,
                              int arguments_count);
@@ -2445,18 +2429,13 @@
                               HValue* receiver,
                               SmallMapList* receiver_types);
   bool TryInlineApiFunctionCall(Call* expr, HValue* receiver);
-  bool TryInlineApiGetter(Handle<JSFunction> function,
-                          Handle<Map> receiver_map,
+  bool TryInlineApiGetter(Handle<Object> function, Handle<Map> receiver_map,
                           BailoutId ast_id);
-  bool TryInlineApiSetter(Handle<JSFunction> function,
-                          Handle<Map> receiver_map,
+  bool TryInlineApiSetter(Handle<Object> function, Handle<Map> receiver_map,
                           BailoutId ast_id);
-  bool TryInlineApiCall(Handle<JSFunction> function,
-                         HValue* receiver,
-                         SmallMapList* receiver_maps,
-                         int argc,
-                         BailoutId ast_id,
-                         ApiCallType call_type);
+  bool TryInlineApiCall(Handle<Object> function, HValue* receiver,
+                        SmallMapList* receiver_maps, int argc, BailoutId ast_id,
+                        ApiCallType call_type);
   static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
   static bool CanInlineArrayResizeOperation(Handle<Map> receiver_map);
 
@@ -2534,7 +2513,7 @@
         : builder_(builder),
           access_type_(access_type),
           map_(map),
-          name_(name),
+          name_(isolate()->factory()->InternalizeName(name)),
           field_type_(HType::Tagged()),
           access_(HObjectAccess::ForMap()),
           lookup_type_(NOT_FOUND),
@@ -2599,7 +2578,7 @@
 
     Isolate* isolate() const { return builder_->isolate(); }
     Handle<JSObject> holder() { return holder_; }
-    Handle<JSFunction> accessor() { return accessor_; }
+    Handle<Object> accessor() { return accessor_; }
     Handle<Object> constant() { return constant_; }
     Handle<Map> transition() { return transition_; }
     SmallMapList* field_maps() { return &field_maps_; }
@@ -2636,12 +2615,7 @@
     Handle<Object> GetAccessorsFromMap(Handle<Map> map) const {
       return GetConstantFromMap(map);
     }
-    Handle<HeapType> GetFieldTypeFromMap(Handle<Map> map) const {
-      DCHECK(IsFound());
-      DCHECK(number_ < map->NumberOfOwnDescriptors());
-      return handle(map->instance_descriptors()->GetFieldType(number_),
-                    isolate());
-    }
+    Handle<FieldType> GetFieldTypeFromMap(Handle<Map> map) const;
     Handle<Map> GetFieldOwnerFromMap(Handle<Map> map) const {
       DCHECK(IsFound());
       DCHECK(number_ < map->NumberOfOwnDescriptors());
@@ -2657,7 +2631,7 @@
 
     void LookupDescriptor(Map* map, Name* name) {
       DescriptorArray* descriptors = map->instance_descriptors();
-      int number = descriptors->SearchWithCache(name, map);
+      int number = descriptors->SearchWithCache(isolate(), name, map);
       if (number == DescriptorArray::kNotFound) return NotFound();
       lookup_type_ = DESCRIPTOR_TYPE;
       details_ = descriptors->GetDetails(number);
@@ -2705,7 +2679,7 @@
     Handle<Map> map_;
     Handle<Name> name_;
     Handle<JSObject> holder_;
-    Handle<JSFunction> accessor_;
+    Handle<Object> accessor_;
     Handle<JSObject> api_holder_;
     Handle<Object> constant_;
     SmallMapList field_maps_;
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.cc b/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 4ec33ab..a535153 100644
--- a/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -74,7 +74,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   DCHECK(is_done());
-  code->set_stack_slots(GetStackSlotCount());
+  code->set_stack_slots(GetTotalFrameSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
@@ -131,13 +131,6 @@
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-      __ int3();
-    }
-#endif
-
     if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
       // Move state of dynamic frame alignment into edx.
       __ Move(edx, Immediate(kNoAlignmentPadding));
@@ -490,7 +483,7 @@
       masm()->nop();
     }
   }
-  safepoints_.Emit(masm(), GetStackSlotCount());
+  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
   return !is_aborted();
 }
 
@@ -578,7 +571,7 @@
   if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return Operand(ebp, StackSlotOffset(op->index()));
+    return Operand(ebp, FrameSlotToFPOffset(op->index()));
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -590,7 +583,7 @@
 Operand LCodeGen::HighOperand(LOperand* op) {
   DCHECK(op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+    return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize);
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -659,9 +652,6 @@
 
   if (op->IsStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     if (is_tagged) {
       translation->StoreStackSlot(index);
     } else if (is_uint32) {
@@ -671,9 +661,6 @@
     }
   } else if (op->IsDoubleStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     translation->StoreDoubleStackSlot(index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -983,26 +970,6 @@
 }
 
 
-void LCodeGen::DoCallStub(LCallStub* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-  switch (instr->hydrogen()->major_key()) {
-    case CodeStub::RegExpExec: {
-      RegExpExecStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::SubString: {
-      SubStringStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   GenerateOsrPrologue();
 }
@@ -1668,13 +1635,6 @@
 }
 
 
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
-  Register result = ToRegister(instr->result());
-  Register map = ToRegister(instr->value());
-  __ EnumLength(result, map);
-}
-
-
 Operand LCodeGen::BuildSeqStringOperand(Register string,
                                         LOperand* index,
                                         String::Encoding encoding) {
@@ -1923,8 +1883,7 @@
   DCHECK(ToRegister(instr->right()).is(eax));
   DCHECK(ToRegister(instr->result()).is(eax));
 
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2167,8 +2126,9 @@
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
-        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+                         ? instr->TrueDestination(chunk_)
+                         : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
@@ -2230,34 +2190,6 @@
 }
 
 
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
-  Representation rep = instr->hydrogen()->value()->representation();
-  DCHECK(!rep.IsInteger32());
-  Register scratch = ToRegister(instr->temp());
-
-  if (rep.IsDouble()) {
-    XMMRegister value = ToDoubleRegister(instr->value());
-    XMMRegister xmm_scratch = double_scratch0();
-    __ xorps(xmm_scratch, xmm_scratch);
-    __ ucomisd(xmm_scratch, value);
-    EmitFalseBranch(instr, not_equal);
-    __ movmskpd(scratch, value);
-    __ test(scratch, Immediate(1));
-    EmitBranch(instr, not_zero);
-  } else {
-    Register value = ToRegister(instr->value());
-    Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
-    __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
-    __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
-           Immediate(0x1));
-    EmitFalseBranch(instr, no_overflow);
-    __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
-           Immediate(0x00000000));
-    EmitBranch(instr, equal);
-  }
-}
-
-
 Condition LCodeGen::EmitIsString(Register input,
                                  Register temp1,
                                  Label* is_not_string,
@@ -2516,8 +2448,7 @@
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
-  Handle<Code> ic =
-      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
@@ -2643,9 +2574,9 @@
 
   __ mov(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
-                                         SLOPPY, PREMONOMORPHIC).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2760,10 +2691,10 @@
 
   __ mov(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(
-          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
-          instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), NOT_INSIDE_TYPEOF,
+                        instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2877,6 +2808,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -2996,8 +2930,8 @@
   }
 
   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3657,21 +3591,22 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  HCallFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(esi));
   DCHECK(ToRegister(instr->function()).is(edi));
   DCHECK(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
-  if (instr->hydrogen()->HasVectorAndSlot()) {
+  ConvertReceiverMode mode = hinstr->convert_mode();
+  if (hinstr->HasVectorAndSlot()) {
     Register slot_register = ToRegister(instr->temp_slot());
     Register vector_register = ToRegister(instr->temp_vector());
     DCHECK(slot_register.is(edx));
     DCHECK(vector_register.is(ebx));
 
     AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-    int index = vector->GetIndex(instr->hydrogen()->slot());
+    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+    int index = vector->GetIndex(hinstr->slot());
 
     __ mov(vector_register, vector);
     __ mov(slot_register, Immediate(Smi::FromInt(index)));
@@ -3947,6 +3882,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -5155,8 +5093,8 @@
     final_branch_condition = equal;
 
   } else if (String::Equals(type_name, factory()->undefined_string())) {
-    __ cmp(input, factory()->undefined_value());
-    __ j(equal, true_label, true_distance);
+    __ cmp(input, factory()->null_value());
+    __ j(equal, false_label, false_distance);
     __ JumpIfSmi(input, false_label, false_distance);
     // Check for undetectable objects => true.
     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
@@ -5328,12 +5266,6 @@
 
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
-  __ test(eax, Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
-
-  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
-  __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
-  DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
 
   Label use_cache, call_runtime;
   __ CheckEnumCache(&call_runtime);
@@ -5344,11 +5276,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(eax);
-  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
-  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-         isolate()->factory()->meta_map());
-  DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+  CallRuntime(Runtime::kForInEnumerate, instr);
   __ bind(&use_cache);
 }
 
@@ -5460,15 +5388,6 @@
 }
 
 
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
-  Handle<ScopeInfo> scope_info = instr->scope_info();
-  __ Push(scope_info);
-  __ push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kPushBlockContext, instr);
-  RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.h b/src/crankshaft/ia32/lithium-codegen-ia32.h
index 06a3e10..589ef2e 100644
--- a/src/crankshaft/ia32/lithium-codegen-ia32.h
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -47,10 +47,8 @@
   }
 
   bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 ||
-        info()->is_non_deferred_calling() ||
-        !info()->IsStub() ||
-        info()->requires_frame();
+    return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+           !info()->IsStub() || info()->requires_frame();
   }
   bool NeedsDeferredFrame() const {
     return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -132,7 +130,13 @@
                        Register temporary,
                        Register temporary2);
 
-  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  bool HasAllocatedStackSlots() const {
+    return chunk()->HasAllocatedStackSlots();
+  }
+  int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+  int GetTotalFrameSlotCount() const {
+    return chunk()->GetTotalFrameSlotCount();
+  }
 
   void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
 
diff --git a/src/crankshaft/ia32/lithium-ia32.cc b/src/crankshaft/ia32/lithium-ia32.cc
index a0cb939..e2772d5 100644
--- a/src/crankshaft/ia32/lithium-ia32.cc
+++ b/src/crankshaft/ia32/lithium-ia32.cc
@@ -344,11 +344,11 @@
 int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
   // Skip a slot if for a double-width slot.
   if (kind == DOUBLE_REGISTERS) {
-    spill_slot_count_++;
-    spill_slot_count_ |= 1;
+    current_frame_slots_++;
+    current_frame_slots_ |= 1;
     num_double_slots_++;
   }
-  return spill_slot_count_++;
+  return current_frame_slots_++;
 }
 
 
@@ -437,7 +437,7 @@
   // Reserve the first spill slot for the state of dynamic alignment.
   if (info()->IsOptimizing()) {
     int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
-    DCHECK_EQ(alignment_state_index, 0);
+    DCHECK_EQ(alignment_state_index, 4);
     USE(alignment_state_index);
   }
 
@@ -1524,14 +1524,22 @@
     DCHECK(instr->left()->representation().Equals(instr->representation()));
     DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
-    LOperand* right = UseOrConstant(instr->BetterRightOperand());
+    HValue* h_right = instr->BetterRightOperand();
+    LOperand* right = UseOrConstant(h_right);
     LOperand* temp = NULL;
     if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
       temp = TempRegister();
     }
     LMulI* mul = new(zone()) LMulI(left, right, temp);
-    if (instr->CheckFlag(HValue::kCanOverflow) ||
-        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    int constant_value =
+        h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0;
+    // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls
+    // |DeoptimizeIf|.
+    bool needs_environment =
+        instr->CheckFlag(HValue::kCanOverflow) ||
+        (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+         (!right->IsConstantOperand() || constant_value <= 0));
+    if (needs_environment) {
       AssignEnvironment(mul);
     }
     return DefineSameAsFirst(mul);
@@ -1701,14 +1709,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
-    HCompareMinusZeroAndBranch* instr) {
-  LOperand* value = UseRegister(instr->value());
-  LOperand* scratch = TempRegister();
-  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
@@ -1780,12 +1780,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
-  LOperand* map = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
 LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
   LOperand* string = UseRegisterAtStart(instr->string());
   LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2492,8 +2486,7 @@
     return DefineAsSpilled(result, spill_index);
   } else {
     DCHECK(info()->IsStub());
-    CallInterfaceDescriptor descriptor =
-        info()->code_stub()->GetCallInterfaceDescriptor();
+    CallInterfaceDescriptor descriptor = graph()->descriptor();
     int index = static_cast<int>(instr->index());
     Register reg = descriptor.GetRegisterParameter(index);
     return DefineFixed(result, reg);
@@ -2519,18 +2512,12 @@
       // The first local is saved at the end of the unoptimized frame.
       spill_index = graph()->osr()->UnoptimizedFrameSlots();
     }
+    spill_index += StandardFrameConstants::kFixedSlotCount;
   }
   return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
 
 
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LCallStub* result = new(zone()) LCallStub(context);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
   // There are no real uses of the arguments object.
   // arguments.length and element access are supported directly on
@@ -2680,16 +2667,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
-    HAllocateBlockContext* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LAllocateBlockContext* result =
-      new(zone()) LAllocateBlockContext(context, function);
-  return MarkAsCall(DefineFixed(result, esi), instr);
-}
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/ia32/lithium-ia32.h b/src/crankshaft/ia32/lithium-ia32.h
index ab7a4b5..e22ab43 100644
--- a/src/crankshaft/ia32/lithium-ia32.h
+++ b/src/crankshaft/ia32/lithium-ia32.h
@@ -24,7 +24,6 @@
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
   V(AccessArgumentsAt)                       \
   V(AddI)                                    \
-  V(AllocateBlockContext)                    \
   V(Allocate)                                \
   V(ApplyArguments)                          \
   V(ArgumentsElements)                       \
@@ -39,7 +38,6 @@
   V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
-  V(CallStub)                                \
   V(CheckArrayBufferNotNeutered)             \
   V(CheckInstanceType)                       \
   V(CheckMaps)                               \
@@ -51,7 +49,6 @@
   V(ClampIToUint8)                           \
   V(ClampTToUint8)                           \
   V(ClassOfTestAndBranch)                    \
-  V(CompareMinusZeroAndBranch)               \
   V(CompareNumericAndBranch)                 \
   V(CmpObjectEqAndBranch)                    \
   V(CmpHoleAndBranch)                        \
@@ -105,7 +102,6 @@
   V(LoadNamedField)                          \
   V(LoadNamedGeneric)                        \
   V(LoadRoot)                                \
-  V(MapEnumLength)                           \
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathExp)                                 \
@@ -455,19 +451,6 @@
 };
 
 
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallStub(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
-  DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
 class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
  public:
   bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -975,22 +958,6 @@
 };
 
 
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
-  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
-                               "cmp-minus-zero-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
 class LIsStringAndBranch final : public LControlInstruction<1, 1> {
  public:
   LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1140,8 +1107,6 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
   DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
 
-  Strength strength() { return hydrogen()->strength(); }
-
   LOperand* context() { return inputs_[0]; }
   Token::Value op() const { return hydrogen()->token(); }
 };
@@ -1338,18 +1303,6 @@
 };
 
 
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMapEnumLength(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
 class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1481,8 +1434,6 @@
 
   DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
 
-  Strength strength() { return hydrogen()->strength(); }
-
  private:
   Token::Value op_;
 };
@@ -2603,23 +2554,6 @@
 };
 
 
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
-  LAllocateBlockContext(LOperand* context, LOperand* function) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-
-  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/lithium-codegen.cc b/src/crankshaft/lithium-codegen.cc
index 5bd1e6a..c5b7e9c 100644
--- a/src/crankshaft/lithium-codegen.cc
+++ b/src/crankshaft/lithium-codegen.cc
@@ -154,7 +154,9 @@
 
 
 void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
-  masm()->RecordDeoptReason(deopt_info.deopt_reason, deopt_info.position);
+  SourcePosition position = deopt_info.position;
+  int raw_position = position.IsUnknown() ? 0 : position.raw();
+  masm()->RecordDeoptReason(deopt_info.deopt_reason, raw_position);
 }
 
 
diff --git a/src/crankshaft/lithium.cc b/src/crankshaft/lithium.cc
index 82ad696..6776390 100644
--- a/src/crankshaft/lithium.cc
+++ b/src/crankshaft/lithium.cc
@@ -246,22 +246,9 @@
   stream->Add("}");
 }
 
-
-int StackSlotOffset(int index) {
-  if (index >= 0) {
-    // Local or spill slot. Skip the frame pointer, function, and
-    // context in the fixed part of the frame.
-    return -(index + 1) * kPointerSize -
-        StandardFrameConstants::kFixedFrameSizeFromFp;
-  } else {
-    // Incoming parameter. Skip the return address.
-    return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
-  }
-}
-
-
 LChunk::LChunk(CompilationInfo* info, HGraph* graph)
-    : spill_slot_count_(0),
+    : base_frame_slots_(StandardFrameConstants::kFixedFrameSize / kPointerSize),
+      current_frame_slots_(base_frame_slots_),
       info_(info),
       graph_(graph),
       instructions_(32, info->zone()),
@@ -270,7 +257,6 @@
       deprecation_dependencies_(32, info->zone()),
       stability_dependencies_(8, info->zone()) {}
 
-
 LLabel* LChunk::GetLabel(int block_id) const {
   HBasicBlock* block = graph_->blocks()->at(block_id);
   int first_instruction = block->first_instruction_index();
@@ -495,9 +481,9 @@
   while (!iterator.Done()) {
     if (info()->saves_caller_doubles()) {
       if (kDoubleSize == kPointerSize * 2) {
-        spill_slot_count_ += 2;
+        current_frame_slots_ += 2;
       } else {
-        spill_slot_count_++;
+        current_frame_slots_++;
       }
     }
     iterator.Advance();
diff --git a/src/crankshaft/lithium.h b/src/crankshaft/lithium.h
index 10e980e..5cfc0c3 100644
--- a/src/crankshaft/lithium.h
+++ b/src/crankshaft/lithium.h
@@ -638,7 +638,13 @@
 
   int ParameterAt(int index);
   int GetParameterStackSlot(int index) const;
-  int spill_slot_count() const { return spill_slot_count_; }
+  bool HasAllocatedStackSlots() const {
+    return current_frame_slots_ != base_frame_slots_;
+  }
+  int GetSpillSlotCount() const {
+    return current_frame_slots_ - base_frame_slots_;
+  }
+  int GetTotalFrameSlotCount() const { return current_frame_slots_; }
   CompilationInfo* info() const { return info_; }
   HGraph* graph() const { return graph_; }
   Isolate* isolate() const { return graph_->isolate(); }
@@ -687,7 +693,8 @@
  protected:
   LChunk(CompilationInfo* info, HGraph* graph);
 
-  int spill_slot_count_;
+  int base_frame_slots_;
+  int current_frame_slots_;
 
  private:
   void CommitDependencies(Handle<Code> code) const;
@@ -757,8 +764,6 @@
 };
 
 
-int StackSlotOffset(int index);
-
 enum NumberUntagDMode {
   NUMBER_CANDIDATE_IS_SMI,
   NUMBER_CANDIDATE_IS_ANY_TAGGED
diff --git a/src/crankshaft/mips/lithium-codegen-mips.cc b/src/crankshaft/mips/lithium-codegen-mips.cc
index 2414f0d..8febb57 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -83,7 +83,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   DCHECK(is_done());
-  code->set_stack_slots(GetStackSlotCount());
+  code->set_stack_slots(GetTotalFrameSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
 }
@@ -127,13 +127,6 @@
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-      __ stop("stop_at");
-    }
-#endif
-
     // a1: Callee's JS function.
     // cp: Callee's context.
     // fp: Caller's frame pointer.
@@ -379,7 +372,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   DCHECK(is_done());
-  safepoints_.Emit(masm(), GetStackSlotCount());
+  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
   return !is_aborted();
 }
 
@@ -552,7 +545,7 @@
   DCHECK(!op->IsDoubleRegister());
   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return MemOperand(fp, StackSlotOffset(op->index()));
+    return MemOperand(fp, FrameSlotToFPOffset(op->index()));
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -564,7 +557,7 @@
 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
   DCHECK(op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+    return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -633,9 +626,6 @@
 
   if (op->IsStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     if (is_tagged) {
       translation->StoreStackSlot(index);
     } else if (is_uint32) {
@@ -645,9 +635,6 @@
     }
   } else if (op->IsDoubleStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     translation->StoreDoubleStackSlot(index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -946,26 +933,6 @@
 }
 
 
-void LCodeGen::DoCallStub(LCallStub* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->result()).is(v0));
-  switch (instr->hydrogen()->major_key()) {
-    case CodeStub::RegExpExec: {
-      RegExpExecStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::SubString: {
-      SubStringStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   GenerateOsrPrologue();
 }
@@ -1411,8 +1378,7 @@
           if (constant < 0)  __ Subu(result, zero_reg, result);
         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
           int32_t shift = WhichPowerOf2(constant_abs - 1);
-          __ sll(scratch, left, shift);
-          __ Addu(result, scratch, left);
+          __ Lsa(result, left, left, shift);
           // Correct the sign of the result if the constant is negative.
           if (constant < 0)  __ Subu(result, zero_reg, result);
         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
@@ -1652,13 +1618,6 @@
 }
 
 
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
-  Register result = ToRegister(instr->result());
-  Register map = ToRegister(instr->value());
-  __ EnumLength(result, map);
-}
-
-
 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
                                            LOperand* index,
                                            String::Encoding encoding) {
@@ -1804,8 +1763,14 @@
     __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
     // At this point, both left and right are either 0 or -0.
     if (operation == HMathMinMax::kMathMin) {
+      // The algorithm is: -((-L) + (-R)), which in case of L and R being
+      // different registers is most efficiently expressed as -((-L) - R).
       __ neg_d(left_reg, left_reg);
-      __ sub_d(result_reg, left_reg, right_reg);
+      if (left_reg.is(right_reg)) {
+        __ add_d(result_reg, left_reg, right_reg);
+      } else {
+        __ sub_d(result_reg, left_reg, right_reg);
+      }
       __ neg_d(result_reg, result_reg);
     } else {
       __ add_d(result_reg, left_reg, right_reg);
@@ -1877,8 +1842,7 @@
   DCHECK(ToRegister(instr->right()).is(a0));
   DCHECK(ToRegister(instr->result()).is(v0));
 
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
   // Other arch use a nop here, to signal that there is no inlined
   // patchable code. Mips does not need the nop, since our marker
@@ -2159,8 +2123,9 @@
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
-        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+                         ? instr->TrueDestination(chunk_)
+                         : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
@@ -2235,32 +2200,6 @@
 }
 
 
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
-  Representation rep = instr->hydrogen()->value()->representation();
-  DCHECK(!rep.IsInteger32());
-  Register scratch = ToRegister(instr->temp());
-
-  if (rep.IsDouble()) {
-    DoubleRegister value = ToDoubleRegister(instr->value());
-    EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
-    __ FmoveHigh(scratch, value);
-    __ li(at, 0x80000000);
-  } else {
-    Register value = ToRegister(instr->value());
-    __ CheckMap(value,
-                scratch,
-                Heap::kHeapNumberMapRootIndex,
-                instr->FalseLabel(chunk()),
-                DO_SMI_CHECK);
-    __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
-    EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
-    __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-    __ mov(at, zero_reg);
-  }
-  EmitBranch(instr, eq, scratch, Operand(at));
-}
-
-
 Condition LCodeGen::EmitIsString(Register input,
                                  Register temp1,
                                  Label* is_not_string,
@@ -2530,8 +2469,7 @@
   DCHECK(ToRegister(instr->context()).is(cp));
   Token::Value op = instr->op();
 
-  Handle<Code> ic =
-      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   // On MIPS there is no need for a "no inlined smi code" marker (nop).
 
@@ -2577,8 +2515,7 @@
     Register reg = ToRegister(instr->parameter_count());
     // The argument count parameter is a smi
     __ SmiUntag(reg);
-    __ sll(at, reg, kPointerSizeLog2);
-    __ Addu(sp, sp, at);
+    __ Lsa(sp, sp, reg, kPointerSizeLog2);
   }
 
   __ Jump(ra);
@@ -2624,9 +2561,9 @@
 
   __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
-                                         SLOPPY, PREMONOMORPHIC).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2725,10 +2662,10 @@
   // Name is always in a2.
   __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(
-          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
-          instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), NOT_INSIDE_TYPEOF,
+                        instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2780,8 +2717,7 @@
       Register index = ToRegister(instr->index());
       __ li(at, Operand(const_length + 1));
       __ Subu(result, at, index);
-      __ sll(at, result, kPointerSizeLog2);
-      __ Addu(at, arguments, at);
+      __ Lsa(at, arguments, result, kPointerSizeLog2);
       __ lw(result, MemOperand(at));
     }
   } else if (instr->index()->IsConstantOperand()) {
@@ -2790,12 +2726,10 @@
     int loc = const_index - 1;
     if (loc != 0) {
       __ Subu(result, length, Operand(loc));
-      __ sll(at, result, kPointerSizeLog2);
-      __ Addu(at, arguments, at);
+      __ Lsa(at, arguments, result, kPointerSizeLog2);
       __ lw(result, MemOperand(at));
     } else {
-      __ sll(at, length, kPointerSizeLog2);
-      __ Addu(at, arguments, at);
+      __ Lsa(at, arguments, length, kPointerSizeLog2);
       __ lw(result, MemOperand(at));
     }
   } else {
@@ -2803,8 +2737,7 @@
     Register index = ToRegister(instr->index());
     __ Subu(result, length, index);
     __ Addu(result, result, 1);
-    __ sll(at, result, kPointerSizeLog2);
-    __ Addu(at, arguments, at);
+    __ Lsa(at, arguments, result, kPointerSizeLog2);
     __ lw(result, MemOperand(at));
   }
 }
@@ -2883,6 +2816,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -2913,8 +2849,7 @@
     key = ToRegister(instr->key());
     int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
         ? (element_size_shift - kSmiTagSize) : element_size_shift;
-    __ sll(at, key, shift_size);
-    __ Addu(scratch, scratch, at);
+    __ Lsa(scratch, scratch, key, shift_size);
   }
 
   __ ldc1(result, MemOperand(scratch));
@@ -2945,11 +2880,9 @@
     // during bound check elimination with the index argument to the bounds
     // check, which can be tagged, so that case must be handled here, too.
     if (instr->hydrogen()->key()->representation().IsSmi()) {
-      __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
-      __ addu(scratch, elements, scratch);
+      __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
     } else {
-      __ sll(scratch, key, kPointerSizeLog2);
-      __ addu(scratch, elements, scratch);
+      __ Lsa(scratch, elements, key, kPointerSizeLog2);
     }
   }
   __ lw(result, MemOperand(store_base, offset));
@@ -3042,8 +2975,8 @@
   }
 
   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3683,21 +3616,22 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  HCallFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->function()).is(a1));
   DCHECK(ToRegister(instr->result()).is(v0));
 
   int arity = instr->arity();
-  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
-  if (instr->hydrogen()->HasVectorAndSlot()) {
+  ConvertReceiverMode mode = hinstr->convert_mode();
+  if (hinstr->HasVectorAndSlot()) {
     Register slot_register = ToRegister(instr->temp_slot());
     Register vector_register = ToRegister(instr->temp_vector());
     DCHECK(slot_register.is(a3));
     DCHECK(vector_register.is(a2));
 
     AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-    int index = vector->GetIndex(instr->hydrogen()->slot());
+    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+    int index = vector->GetIndex(hinstr->slot());
 
     __ li(vector_register, vector);
     __ li(slot_register, Operand(Smi::FromInt(index)));
@@ -3944,8 +3878,7 @@
         address = external_pointer;
       }
     } else {
-      __ sll(address, key, shift_size);
-      __ Addu(address, external_pointer, address);
+      __ Lsa(address, external_pointer, key, shift_size);
     }
 
     if (elements_kind == FLOAT32_ELEMENTS) {
@@ -3985,6 +3918,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -4062,11 +3998,9 @@
     // during bound check elimination with the index argument to the bounds
     // check, which can be tagged, so that case must be handled here, too.
     if (instr->hydrogen()->key()->representation().IsSmi()) {
-      __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
-      __ addu(scratch, elements, scratch);
+      __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
     } else {
-      __ sll(scratch, key, kPointerSizeLog2);
-      __ addu(scratch, elements, scratch);
+      __ Lsa(scratch, elements, key, kPointerSizeLog2);
     }
   }
   __ sw(value, MemOperand(store_base, offset));
@@ -4353,8 +4287,7 @@
   __ Branch(deferred->entry(), hi,
             char_code, Operand(String::kMaxOneByteCharCode));
   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
-  __ sll(scratch, char_code, kPointerSizeLog2);
-  __ Addu(result, result, scratch);
+  __ Lsa(result, result, char_code, kPointerSizeLog2);
   __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   __ Branch(deferred->entry(), eq, result, Operand(scratch));
@@ -5294,8 +5227,8 @@
     final_branch_condition = eq;
 
   } else if (String::Equals(type_name, factory->undefined_string())) {
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
     // The first instruction of JumpIfSmi is an And - it is safe in the delay
     // slot.
     __ JumpIfSmi(input, false_label);
@@ -5482,19 +5415,10 @@
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   Register result = ToRegister(instr->result());
   Register object = ToRegister(instr->object());
-  __ And(at, object, kSmiTagMask);
-  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
-
-  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
-  __ GetObjectType(object, a1, a1);
-  DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
-               Operand(JS_PROXY_TYPE));
 
   Label use_cache, call_runtime;
   DCHECK(object.is(a0));
-  Register null_value = t1;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ CheckEnumCache(null_value, &call_runtime);
+  __ CheckEnumCache(&call_runtime);
 
   __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
   __ Branch(&use_cache);
@@ -5502,12 +5426,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(object);
-  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
-  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
-  DCHECK(result.is(v0));
-  __ LoadRoot(at, Heap::kMetaMapRootIndex);
-  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
+  CallRuntime(Runtime::kForInEnumerate, instr);
   __ bind(&use_cache);
 }
 
@@ -5622,15 +5541,6 @@
 }
 
 
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
-  Handle<ScopeInfo> scope_info = instr->scope_info();
-  __ li(at, scope_info);
-  __ Push(at, ToRegister(instr->function()));
-  CallRuntime(Runtime::kPushBlockContext, instr);
-  RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/mips/lithium-codegen-mips.h b/src/crankshaft/mips/lithium-codegen-mips.h
index 160ab9a..df72b2e 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/src/crankshaft/mips/lithium-codegen-mips.h
@@ -44,10 +44,8 @@
   }
 
   bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 ||
-        info()->is_non_deferred_calling() ||
-        !info()->IsStub() ||
-        info()->requires_frame();
+    return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+           !info()->IsStub() || info()->requires_frame();
   }
   bool NeedsDeferredFrame() const {
     return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -153,7 +151,13 @@
                        Register temporary,
                        Register temporary2);
 
-  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  bool HasAllocatedStackSlots() const {
+    return chunk()->HasAllocatedStackSlots();
+  }
+  int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+  int GetTotalFrameSlotCount() const {
+    return chunk()->GetTotalFrameSlotCount();
+  }
 
   void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
 
diff --git a/src/crankshaft/mips/lithium-mips.cc b/src/crankshaft/mips/lithium-mips.cc
index a9978e1..a7c5488 100644
--- a/src/crankshaft/mips/lithium-mips.cc
+++ b/src/crankshaft/mips/lithium-mips.cc
@@ -390,8 +390,8 @@
 
 int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
   // Skip a slot if for a double-width slot.
-  if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
-  return spill_slot_count_++;
+  if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
+  return current_frame_slots_++;
 }
 
 
@@ -1690,14 +1690,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
-    HCompareMinusZeroAndBranch* instr) {
-  LOperand* value = UseRegister(instr->value());
-  LOperand* scratch = TempRegister();
-  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
@@ -1766,12 +1758,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
-  LOperand* map = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
 LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
   LOperand* string = UseRegisterAtStart(instr->string());
   LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2396,8 +2382,7 @@
     return DefineAsSpilled(result, spill_index);
   } else {
     DCHECK(info()->IsStub());
-    CallInterfaceDescriptor descriptor =
-        info()->code_stub()->GetCallInterfaceDescriptor();
+    CallInterfaceDescriptor descriptor = graph()->descriptor();
     int index = static_cast<int>(instr->index());
     Register reg = descriptor.GetRegisterParameter(index);
     return DefineFixed(result, reg);
@@ -2418,17 +2403,12 @@
       Retry(kTooManySpillSlotsNeededForOSR);
       spill_index = 0;
     }
+    spill_index += StandardFrameConstants::kFixedSlotCount;
   }
   return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
 
 
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
   // There are no real uses of the arguments object.
   // arguments.length and element access are supported directly on
@@ -2570,16 +2550,6 @@
   return new(zone()) LStoreFrameContext(context);
 }
 
-
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
-    HAllocateBlockContext* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LAllocateBlockContext* result =
-      new(zone()) LAllocateBlockContext(context, function);
-  return MarkAsCall(DefineFixed(result, cp), instr);
-}
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/mips/lithium-mips.h b/src/crankshaft/mips/lithium-mips.h
index 880d243..8b36c5d 100644
--- a/src/crankshaft/mips/lithium-mips.h
+++ b/src/crankshaft/mips/lithium-mips.h
@@ -21,7 +21,6 @@
   V(AccessArgumentsAt)                       \
   V(AddI)                                    \
   V(Allocate)                                \
-  V(AllocateBlockContext)                    \
   V(ApplyArguments)                          \
   V(ArgumentsElements)                       \
   V(ArgumentsLength)                         \
@@ -35,7 +34,6 @@
   V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
-  V(CallStub)                                \
   V(CheckArrayBufferNotNeutered)             \
   V(CheckInstanceType)                       \
   V(CheckMaps)                               \
@@ -47,7 +45,6 @@
   V(ClampIToUint8)                           \
   V(ClampTToUint8)                           \
   V(ClassOfTestAndBranch)                    \
-  V(CompareMinusZeroAndBranch)               \
   V(CompareNumericAndBranch)                 \
   V(CmpObjectEqAndBranch)                    \
   V(CmpHoleAndBranch)                        \
@@ -101,7 +98,6 @@
   V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
   V(LoadNamedGeneric)                        \
-  V(MapEnumLength)                           \
   V(MathAbs)                                 \
   V(MathExp)                                 \
   V(MathClz32)                               \
@@ -457,19 +453,6 @@
 };
 
 
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallStub(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
-  DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
 class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
  public:
   bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -970,22 +953,6 @@
 };
 
 
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
-  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
-                               "cmp-minus-zero-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
 class LIsStringAndBranch final : public LControlInstruction<1, 1> {
  public:
   LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1136,8 +1103,6 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
   DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
 
-  Strength strength() { return hydrogen()->strength(); }
-
   Token::Value op() const { return hydrogen()->token(); }
 };
 
@@ -1324,18 +1289,6 @@
 };
 
 
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMapEnumLength(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
 class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1461,8 +1414,6 @@
 
   DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
 
-  Strength strength() { return hydrogen()->strength(); }
-
  private:
   Token::Value op_;
 };
@@ -2575,23 +2526,6 @@
 };
 
 
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
-  LAllocateBlockContext(LOperand* context, LOperand* function) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-
-  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.cc b/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 29d19ee..ddf908d 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -58,7 +58,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   DCHECK(is_done());
-  code->set_stack_slots(GetStackSlotCount());
+  code->set_stack_slots(GetTotalFrameSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
 }
@@ -102,13 +102,6 @@
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-      __ stop("stop_at");
-    }
-#endif
-
     // a1: Callee's JS function.
     // cp: Callee's context.
     // fp: Caller's frame pointer.
@@ -362,7 +355,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   DCHECK(is_done());
-  safepoints_.Emit(masm(), GetStackSlotCount());
+  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
   return !is_aborted();
 }
 
@@ -537,7 +530,7 @@
   DCHECK(!op->IsDoubleRegister());
   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return MemOperand(fp, StackSlotOffset(op->index()));
+    return MemOperand(fp, FrameSlotToFPOffset(op->index()));
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -549,8 +542,8 @@
 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
   DCHECK(op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
-    return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize);
+    // return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
+    return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kIntSize);
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -621,9 +614,6 @@
 
   if (op->IsStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     if (is_tagged) {
       translation->StoreStackSlot(index);
     } else if (is_uint32) {
@@ -633,9 +623,6 @@
     }
   } else if (op->IsDoubleStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     translation->StoreDoubleStackSlot(index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -935,26 +922,6 @@
 }
 
 
-void LCodeGen::DoCallStub(LCallStub* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->result()).is(v0));
-  switch (instr->hydrogen()->major_key()) {
-    case CodeStub::RegExpExec: {
-      RegExpExecStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::SubString: {
-      SubStringStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   GenerateOsrPrologue();
 }
@@ -1410,8 +1377,7 @@
           if (constant < 0) __ Dsubu(result, zero_reg, result);
         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
           int32_t shift = WhichPowerOf2(constant_abs - 1);
-          __ dsll(scratch, left, shift);
-          __ Daddu(result, scratch, left);
+          __ Dlsa(result, left, left, shift);
           // Correct the sign of the result if the constant is negative.
           if (constant < 0) __ Dsubu(result, zero_reg, result);
         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
@@ -1512,8 +1478,7 @@
           if (constant < 0) __ Subu(result, zero_reg, result);
         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
           int32_t shift = WhichPowerOf2(constant_abs - 1);
-          __ sll(scratch, left, shift);
-          __ addu(result, scratch, left);
+          __ Lsa(result, left, left, shift);
           // Correct the sign of the result if the constant is negative.
           if (constant < 0) __ Subu(result, zero_reg, result);
         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
@@ -1749,13 +1714,6 @@
 }
 
 
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
-  Register result = ToRegister(instr->result());
-  Register map = ToRegister(instr->value());
-  __ EnumLength(result, map);
-}
-
-
 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
                                            LOperand* index,
                                            String::Encoding encoding) {
@@ -1922,8 +1880,14 @@
     __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
     // At this point, both left and right are either 0 or -0.
     if (operation == HMathMinMax::kMathMin) {
+      // The algorithm is: -((-L) + (-R)), which in case of L and R being
+      // different registers is most efficiently expressed as -((-L) - R).
       __ neg_d(left_reg, left_reg);
-      __ sub_d(result_reg, left_reg, right_reg);
+      if (left_reg.is(right_reg)) {
+        __ add_d(result_reg, left_reg, right_reg);
+      } else {
+        __ sub_d(result_reg, left_reg, right_reg);
+      }
       __ neg_d(result_reg, result_reg);
     } else {
       __ add_d(result_reg, left_reg, right_reg);
@@ -1995,8 +1959,7 @@
   DCHECK(ToRegister(instr->right()).is(a0));
   DCHECK(ToRegister(instr->result()).is(v0));
 
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
   // Other arch use a nop here, to signal that there is no inlined
   // patchable code. Mips does not need the nop, since our marker
@@ -2277,8 +2240,9 @@
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
-        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+                         ? instr->TrueDestination(chunk_)
+                         : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
@@ -2353,35 +2317,6 @@
 }
 
 
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
-  Representation rep = instr->hydrogen()->value()->representation();
-  DCHECK(!rep.IsInteger32());
-  Register scratch = ToRegister(instr->temp());
-
-  if (rep.IsDouble()) {
-    DoubleRegister value = ToDoubleRegister(instr->value());
-    EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
-    __ FmoveHigh(scratch, value);
-    // Only use low 32-bits of value.
-    __ dsll32(scratch, scratch, 0);
-    __ dsrl32(scratch, scratch, 0);
-    __ li(at, 0x80000000);
-  } else {
-    Register value = ToRegister(instr->value());
-    __ CheckMap(value,
-                scratch,
-                Heap::kHeapNumberMapRootIndex,
-                instr->FalseLabel(chunk()),
-                DO_SMI_CHECK);
-    __ lwu(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
-    EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
-    __ lwu(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-    __ mov(at, zero_reg);
-  }
-  EmitBranch(instr, eq, scratch, Operand(at));
-}
-
-
 Condition LCodeGen::EmitIsString(Register input,
                                  Register temp1,
                                  Label* is_not_string,
@@ -2655,8 +2590,7 @@
   DCHECK(ToRegister(instr->context()).is(cp));
   Token::Value op = instr->op();
 
-  Handle<Code> ic =
-      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   // On MIPS there is no need for a "no inlined smi code" marker (nop).
 
@@ -2702,8 +2636,7 @@
     Register reg = ToRegister(instr->parameter_count());
     // The argument count parameter is a smi
     __ SmiUntag(reg);
-    __ dsll(at, reg, kPointerSizeLog2);
-    __ Daddu(sp, sp, at);
+    __ Dlsa(sp, sp, reg, kPointerSizeLog2);
   }
 
   __ Jump(ra);
@@ -2749,9 +2682,9 @@
 
   __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
-                                         SLOPPY, PREMONOMORPHIC).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2865,10 +2798,10 @@
   // Name is always in a2.
   __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(
-          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
-          instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), NOT_INSIDE_TYPEOF,
+                        instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2920,8 +2853,7 @@
       Register index = ToRegister(instr->index());
       __ li(at, Operand(const_length + 1));
       __ Dsubu(result, at, index);
-      __ dsll(at, result, kPointerSizeLog2);
-      __ Daddu(at, arguments, at);
+      __ Dlsa(at, arguments, result, kPointerSizeLog2);
       __ ld(result, MemOperand(at));
     }
   } else if (instr->index()->IsConstantOperand()) {
@@ -2930,12 +2862,10 @@
     int loc = const_index - 1;
     if (loc != 0) {
       __ Dsubu(result, length, Operand(loc));
-      __ dsll(at, result, kPointerSizeLog2);
-      __ Daddu(at, arguments, at);
+      __ Dlsa(at, arguments, result, kPointerSizeLog2);
       __ ld(result, MemOperand(at));
     } else {
-      __ dsll(at, length, kPointerSizeLog2);
-      __ Daddu(at, arguments, at);
+      __ Dlsa(at, arguments, length, kPointerSizeLog2);
       __ ld(result, MemOperand(at));
     }
   } else {
@@ -2943,8 +2873,7 @@
     Register index = ToRegister(instr->index());
     __ Dsubu(result, length, index);
     __ Daddu(result, result, 1);
-    __ dsll(at, result, kPointerSizeLog2);
-    __ Daddu(at, arguments, at);
+    __ Dlsa(at, arguments, result, kPointerSizeLog2);
     __ ld(result, MemOperand(at));
   }
 }
@@ -3033,6 +2962,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -3106,8 +3038,7 @@
     __ SmiScale(scratch, key, kPointerSizeLog2);
     __ daddu(scratch, elements, scratch);
     } else {
-      __ dsll(scratch, key, kPointerSizeLog2);
-      __ daddu(scratch, elements, scratch);
+      __ Dlsa(scratch, elements, key, kPointerSizeLog2);
     }
   }
 
@@ -3224,8 +3155,8 @@
   }
 
   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3888,21 +3819,22 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  HCallFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->function()).is(a1));
   DCHECK(ToRegister(instr->result()).is(v0));
 
   int arity = instr->arity();
-  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
-  if (instr->hydrogen()->HasVectorAndSlot()) {
+  ConvertReceiverMode mode = hinstr->convert_mode();
+  if (hinstr->HasVectorAndSlot()) {
     Register slot_register = ToRegister(instr->temp_slot());
     Register vector_register = ToRegister(instr->temp_vector());
     DCHECK(slot_register.is(a3));
     DCHECK(vector_register.is(a2));
 
     AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-    int index = vector->GetIndex(instr->hydrogen()->slot());
+    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+    int index = vector->GetIndex(hinstr->slot());
 
     __ li(vector_register, vector);
     __ li(slot_register, Operand(Smi::FromInt(index)));
@@ -4208,6 +4140,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -4281,8 +4216,7 @@
       __ SmiScale(scratch, key, kPointerSizeLog2);
       __ daddu(store_base, elements, scratch);
     } else {
-      __ dsll(scratch, key, kPointerSizeLog2);
-      __ daddu(store_base, elements, scratch);
+      __ Dlsa(store_base, elements, key, kPointerSizeLog2);
     }
   }
 
@@ -4587,8 +4521,7 @@
   __ Branch(deferred->entry(), hi,
             char_code, Operand(String::kMaxOneByteCharCode));
   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
-  __ dsll(scratch, char_code, kPointerSizeLog2);
-  __ Daddu(result, result, scratch);
+  __ Dlsa(result, result, char_code, kPointerSizeLog2);
   __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   __ Branch(deferred->entry(), eq, result, Operand(scratch));
@@ -5499,8 +5432,8 @@
     final_branch_condition = eq;
 
   } else if (String::Equals(type_name, factory->undefined_string())) {
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
     // The first instruction of JumpIfSmi is an And - it is safe in the delay
     // slot.
     __ JumpIfSmi(input, false_label);
@@ -5689,19 +5622,9 @@
   Register result = ToRegister(instr->result());
   Register object = ToRegister(instr->object());
 
-  __ And(at, object, kSmiTagMask);
-  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
-
-  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
-  __ GetObjectType(object, a1, a1);
-  DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
-               Operand(JS_PROXY_TYPE));
-
   Label use_cache, call_runtime;
   DCHECK(object.is(a0));
-  Register null_value = a5;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ CheckEnumCache(null_value, &call_runtime);
+  __ CheckEnumCache(&call_runtime);
 
   __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
   __ Branch(&use_cache);
@@ -5709,12 +5632,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(object);
-  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
-  __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
-  DCHECK(result.is(v0));
-  __ LoadRoot(at, Heap::kMetaMapRootIndex);
-  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
+  CallRuntime(Runtime::kForInEnumerate, instr);
   __ bind(&use_cache);
 }
 
@@ -5827,15 +5745,6 @@
 }
 
 
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
-  Handle<ScopeInfo> scope_info = instr->scope_info();
-  __ li(at, scope_info);
-  __ Push(at, ToRegister(instr->function()));
-  CallRuntime(Runtime::kPushBlockContext, instr);
-  RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.h b/src/crankshaft/mips64/lithium-codegen-mips64.h
index efadb0f..2f1cefa 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -44,10 +44,8 @@
   }
 
   bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 ||
-        info()->is_non_deferred_calling() ||
-        !info()->IsStub() ||
-        info()->requires_frame();
+    return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+           !info()->IsStub() || info()->requires_frame();
   }
   bool NeedsDeferredFrame() const {
     return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -155,7 +153,13 @@
                        Register temporary,
                        Register temporary2);
 
-  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  bool HasAllocatedStackSlots() const {
+    return chunk()->HasAllocatedStackSlots();
+  }
+  int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+  int GetTotalFrameSlotCount() const {
+    return chunk()->GetTotalFrameSlotCount();
+  }
 
   void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
 
diff --git a/src/crankshaft/mips64/lithium-mips64.cc b/src/crankshaft/mips64/lithium-mips64.cc
index 129f615..b66e8ba 100644
--- a/src/crankshaft/mips64/lithium-mips64.cc
+++ b/src/crankshaft/mips64/lithium-mips64.cc
@@ -390,8 +390,8 @@
 
 int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
   // Skip a slot if for a double-width slot.
-  if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
-  return spill_slot_count_++;
+  if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
+  return current_frame_slots_++;
 }
 
 
@@ -1696,14 +1696,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
-    HCompareMinusZeroAndBranch* instr) {
-  LOperand* value = UseRegister(instr->value());
-  LOperand* scratch = TempRegister();
-  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
@@ -1772,12 +1764,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
-  LOperand* map = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
 LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
   LOperand* string = UseRegisterAtStart(instr->string());
   LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2401,8 +2387,7 @@
     return DefineAsSpilled(result, spill_index);
   } else {
     DCHECK(info()->IsStub());
-    CallInterfaceDescriptor descriptor =
-        info()->code_stub()->GetCallInterfaceDescriptor();
+    CallInterfaceDescriptor descriptor = graph()->descriptor();
     int index = static_cast<int>(instr->index());
     Register reg = descriptor.GetRegisterParameter(index);
     return DefineFixed(result, reg);
@@ -2423,17 +2408,12 @@
       Retry(kTooManySpillSlotsNeededForOSR);
       spill_index = 0;
     }
+    spill_index += StandardFrameConstants::kFixedSlotCount;
   }
   return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
 
 
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
   // There are no real uses of the arguments object.
   // arguments.length and element access are supported directly on
@@ -2575,16 +2555,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
-    HAllocateBlockContext* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LAllocateBlockContext* result =
-      new(zone()) LAllocateBlockContext(context, function);
-  return MarkAsCall(DefineFixed(result, cp), instr);
-}
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/mips64/lithium-mips64.h b/src/crankshaft/mips64/lithium-mips64.h
index 01dc234..8d2324f 100644
--- a/src/crankshaft/mips64/lithium-mips64.h
+++ b/src/crankshaft/mips64/lithium-mips64.h
@@ -23,7 +23,6 @@
   V(AddI)                                    \
   V(AddS)                                    \
   V(Allocate)                                \
-  V(AllocateBlockContext)                    \
   V(ApplyArguments)                          \
   V(ArgumentsElements)                       \
   V(ArgumentsLength)                         \
@@ -37,7 +36,6 @@
   V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
-  V(CallStub)                                \
   V(CheckArrayBufferNotNeutered)             \
   V(CheckInstanceType)                       \
   V(CheckMaps)                               \
@@ -49,7 +47,6 @@
   V(ClampIToUint8)                           \
   V(ClampTToUint8)                           \
   V(ClassOfTestAndBranch)                    \
-  V(CompareMinusZeroAndBranch)               \
   V(CompareNumericAndBranch)                 \
   V(CmpObjectEqAndBranch)                    \
   V(CmpHoleAndBranch)                        \
@@ -103,7 +100,6 @@
   V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
   V(LoadNamedGeneric)                        \
-  V(MapEnumLength)                           \
   V(MathAbs)                                 \
   V(MathExp)                                 \
   V(MathClz32)                               \
@@ -460,19 +456,6 @@
 };
 
 
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallStub(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
-  DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
 class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
  public:
   bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -988,22 +971,6 @@
 };
 
 
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
-  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
-                               "cmp-minus-zero-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
 class LIsStringAndBranch final : public LControlInstruction<1, 1> {
  public:
   LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1154,8 +1121,6 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
   DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
 
-  Strength strength() { return hydrogen()->strength(); }
-
   Token::Value op() const { return hydrogen()->token(); }
 };
 
@@ -1356,18 +1321,6 @@
 };
 
 
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMapEnumLength(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
 class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1523,8 +1476,6 @@
 
   DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
 
-  Strength strength() { return hydrogen()->strength(); }
-
  private:
   Token::Value op_;
 };
@@ -2621,23 +2572,6 @@
 };
 
 
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
-  LAllocateBlockContext(LOperand* context, LOperand* function) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-
-  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.cc b/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 936b8a7..921d9b6 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -60,7 +60,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   DCHECK(is_done());
-  code->set_stack_slots(GetStackSlotCount());
+  code->set_stack_slots(GetTotalFrameSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
 }
@@ -104,13 +104,6 @@
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-      __ stop("stop_at");
-    }
-#endif
-
     // r4: Callee's JS function.
     // cp: Callee's context.
     // pp: Callee's constant pool pointer (if enabled)
@@ -372,7 +365,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   DCHECK(is_done());
-  safepoints_.Emit(masm(), GetStackSlotCount());
+  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
   return !is_aborted();
 }
 
@@ -524,7 +517,7 @@
   DCHECK(!op->IsDoubleRegister());
   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return MemOperand(fp, StackSlotOffset(op->index()));
+    return MemOperand(fp, FrameSlotToFPOffset(op->index()));
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -536,7 +529,7 @@
 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
   DCHECK(op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+    return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -600,9 +593,6 @@
 
   if (op->IsStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     if (is_tagged) {
       translation->StoreStackSlot(index);
     } else if (is_uint32) {
@@ -612,9 +602,6 @@
     }
   } else if (op->IsDoubleStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     translation->StoreDoubleStackSlot(index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -897,26 +884,6 @@
 }
 
 
-void LCodeGen::DoCallStub(LCallStub* instr) {
-  DCHECK(ToRegister(instr->context()).is(cp));
-  DCHECK(ToRegister(instr->result()).is(r3));
-  switch (instr->hydrogen()->major_key()) {
-    case CodeStub::RegExpExec: {
-      RegExpExecStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::SubString: {
-      SubStringStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   GenerateOsrPrologue();
 }
@@ -1806,13 +1773,6 @@
 }
 
 
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
-  Register result = ToRegister(instr->result());
-  Register map = ToRegister(instr->value());
-  __ EnumLength(result, map);
-}
-
-
 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
                                            String::Encoding encoding) {
   if (index->IsConstantOperand()) {
@@ -1974,14 +1934,18 @@
     __ bne(&return_left);  // left == right != 0.
 
     // At this point, both left and right are either 0 or -0.
-    // N.B. The following works because +0 + -0 == +0
     if (operation == HMathMinMax::kMathMin) {
-      // For min we want logical-or of sign bit: -(-L + -R)
+      // Min: The algorithm is: -((-L) + (-R)), which in case of L and R being
+      // different registers is most efficiently expressed as -((-L) - R).
       __ fneg(left_reg, left_reg);
-      __ fsub(result_reg, left_reg, right_reg);
+      if (left_reg.is(right_reg)) {
+        __ fadd(result_reg, left_reg, right_reg);
+      } else {
+        __ fsub(result_reg, left_reg, right_reg);
+      }
       __ fneg(result_reg, result_reg);
     } else {
-      // For max we want logical-and of sign bit: (L + R)
+      // Max: The following works because +0 + -0 == +0
       __ fadd(result_reg, left_reg, right_reg);
     }
     __ b(&done);
@@ -2044,8 +2008,7 @@
   DCHECK(ToRegister(instr->right()).is(r3));
   DCHECK(ToRegister(instr->result()).is(r3));
 
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2295,7 +2258,7 @@
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block = EvalComparison(instr->op(), left_val, right_val)
+    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
                          ? instr->TrueDestination(chunk_)
                          : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
@@ -2388,46 +2351,6 @@
 }
 
 
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
-  Representation rep = instr->hydrogen()->value()->representation();
-  DCHECK(!rep.IsInteger32());
-  Register scratch = ToRegister(instr->temp());
-
-  if (rep.IsDouble()) {
-    DoubleRegister value = ToDoubleRegister(instr->value());
-    __ fcmpu(value, kDoubleRegZero);
-    EmitFalseBranch(instr, ne);
-#if V8_TARGET_ARCH_PPC64
-    __ MovDoubleToInt64(scratch, value);
-#else
-    __ MovDoubleHighToInt(scratch, value);
-#endif
-    __ cmpi(scratch, Operand::Zero());
-    EmitBranch(instr, lt);
-  } else {
-    Register value = ToRegister(instr->value());
-    __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
-                instr->FalseLabel(chunk()), DO_SMI_CHECK);
-#if V8_TARGET_ARCH_PPC64
-    __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
-    __ li(ip, Operand(1));
-    __ rotrdi(ip, ip, 1);  // ip = 0x80000000_00000000
-    __ cmp(scratch, ip);
-#else
-    __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
-    __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-    Label skip;
-    __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
-    __ cmp(scratch, r0);
-    __ bne(&skip);
-    __ cmpi(ip, Operand::Zero());
-    __ bind(&skip);
-#endif
-    EmitBranch(instr, eq);
-  }
-}
-
-
 Condition LCodeGen::EmitIsString(Register input, Register temp1,
                                  Label* is_not_string,
                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
@@ -2688,8 +2611,7 @@
   DCHECK(ToRegister(instr->context()).is(cp));
   Token::Value op = instr->op();
 
-  Handle<Code> ic =
-      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   // This instruction also signals no smi code inlined
   __ cmpi(r3, Operand::Zero());
@@ -2790,9 +2712,9 @@
 
   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
-                                         SLOPPY, PREMONOMORPHIC).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2907,10 +2829,10 @@
   // Name is always in r5.
   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(
-          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
-          instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), NOT_INSIDE_TYPEOF,
+                        instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3093,6 +3015,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -3271,8 +3196,8 @@
   }
 
   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3707,13 +3632,8 @@
   // If the input is +0.5, the result is 1.
   __ bgt(&convert);  // Out of [-0.5, +0.5].
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-#if V8_TARGET_ARCH_PPC64
-    __ MovDoubleToInt64(scratch1, input);
-#else
-    __ MovDoubleHighToInt(scratch1, input);
-#endif
-    __ cmpi(scratch1, Operand::Zero());
-    // [-0.5, -0].
+    // [-0.5, -0] (negative) yields minus zero.
+    __ TestDoubleSign(input, scratch1);
     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
   }
   __ fcmpu(input, dot_five);
@@ -3934,21 +3854,22 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  HCallFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(cp));
   DCHECK(ToRegister(instr->function()).is(r4));
   DCHECK(ToRegister(instr->result()).is(r3));
 
   int arity = instr->arity();
-  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
-  if (instr->hydrogen()->HasVectorAndSlot()) {
+  ConvertReceiverMode mode = hinstr->convert_mode();
+  if (hinstr->HasVectorAndSlot()) {
     Register slot_register = ToRegister(instr->temp_slot());
     Register vector_register = ToRegister(instr->temp_vector());
     DCHECK(slot_register.is(r6));
     DCHECK(vector_register.is(r5));
 
     AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-    int index = vector->GetIndex(instr->hydrogen()->slot());
+    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+    int index = vector->GetIndex(hinstr->slot());
 
     __ Move(vector_register, vector);
     __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
@@ -4278,6 +4199,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -4923,17 +4847,7 @@
     // load heap number
     __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
     if (deoptimize_on_minus_zero) {
-#if V8_TARGET_ARCH_PPC64
-      __ MovDoubleToInt64(scratch, result_reg);
-      // rotate left by one for simple compare.
-      __ rldicl(scratch, scratch, 1, 0);
-      __ cmpi(scratch, Operand(1));
-#else
-      __ MovDoubleToInt64(scratch, ip, result_reg);
-      __ cmpi(ip, Operand::Zero());
-      __ bne(&done);
-      __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
-#endif
+      __ TestDoubleIsMinusZero(result_reg, scratch, ip);
       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
     }
     __ b(&done);
@@ -5022,10 +4936,7 @@
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ cmpi(input_reg, Operand::Zero());
       __ bne(&done);
-      __ lwz(scratch1,
-             FieldMemOperand(scratch2, HeapNumber::kValueOffset +
-                                           Register::kExponentOffset));
-      __ cmpwi(scratch1, Operand::Zero());
+      __ TestHeapNumberSign(scratch2, scratch1);
       DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
     }
   }
@@ -5100,12 +5011,7 @@
       Label done;
       __ cmpi(result_reg, Operand::Zero());
       __ bne(&done);
-#if V8_TARGET_ARCH_PPC64
-      __ MovDoubleToInt64(scratch1, double_input);
-#else
-      __ MovDoubleHighToInt(scratch1, double_input);
-#endif
-      __ cmpi(scratch1, Operand::Zero());
+      __ TestDoubleSign(double_input, scratch1);
       DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
       __ bind(&done);
     }
@@ -5130,12 +5036,7 @@
       Label done;
       __ cmpi(result_reg, Operand::Zero());
       __ bne(&done);
-#if V8_TARGET_ARCH_PPC64
-      __ MovDoubleToInt64(scratch1, double_input);
-#else
-      __ MovDoubleHighToInt(scratch1, double_input);
-#endif
-      __ cmpi(scratch1, Operand::Zero());
+      __ TestDoubleSign(double_input, scratch1);
       DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
       __ bind(&done);
     }
@@ -5550,8 +5451,8 @@
     final_branch_condition = eq;
 
   } else if (String::Equals(type_name, factory->undefined_string())) {
-    __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
-    __ beq(true_label);
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    __ beq(false_label);
     __ JumpIfSmi(input, false_label);
     // Check for undetectable objects => true.
     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -5727,17 +5628,8 @@
 
 
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
-  __ TestIfSmi(r3, r0);
-  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
-
-  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
-  __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
-  DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
-
   Label use_cache, call_runtime;
-  Register null_value = r8;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ CheckEnumCache(null_value, &call_runtime);
+  __ CheckEnumCache(&call_runtime);
 
   __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
   __ b(&use_cache);
@@ -5745,12 +5637,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(r3);
-  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
-  __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
-  __ cmp(r4, ip);
-  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+  CallRuntime(Runtime::kForInEnumerate, instr);
   __ bind(&use_cache);
 }
 
@@ -5862,15 +5749,6 @@
 }
 
 
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
-  Handle<ScopeInfo> scope_info = instr->scope_info();
-  __ Push(scope_info);
-  __ push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kPushBlockContext, instr);
-  RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
 #undef __
 }  // namespace internal
 }  // namespace v8
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.h b/src/crankshaft/ppc/lithium-codegen-ppc.h
index b0f016d..1b72bf8 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -44,7 +44,7 @@
   }
 
   bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 || info()->is_non_deferred_calling() ||
+    return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
            !info()->IsStub() || info()->requires_frame();
   }
   bool NeedsDeferredFrame() const {
@@ -141,7 +141,13 @@
                        Handle<String> class_name, Register input,
                        Register temporary, Register temporary2);
 
-  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  bool HasAllocatedStackSlots() const {
+    return chunk()->HasAllocatedStackSlots();
+  }
+  int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+  int GetTotalFrameSlotCount() const {
+    return chunk()->GetTotalFrameSlotCount();
+  }
 
   void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
 
diff --git a/src/crankshaft/ppc/lithium-ppc.cc b/src/crankshaft/ppc/lithium-ppc.cc
index 63aead7..2a04d99 100644
--- a/src/crankshaft/ppc/lithium-ppc.cc
+++ b/src/crankshaft/ppc/lithium-ppc.cc
@@ -396,8 +396,8 @@
 
 int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
   // Skip a slot if for a double-width slot.
-  if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
-  return spill_slot_count_++;
+  if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
+  return current_frame_slots_++;
 }
 
 
@@ -1706,14 +1706,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
-    HCompareMinusZeroAndBranch* instr) {
-  LOperand* value = UseRegister(instr->value());
-  LOperand* scratch = TempRegister();
-  return new (zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1782,12 +1774,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
-  LOperand* map = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new (zone()) LMapEnumLength(map));
-}
-
-
 LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
   LOperand* string = UseRegisterAtStart(instr->string());
   LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2402,8 +2388,7 @@
     return DefineAsSpilled(result, spill_index);
   } else {
     DCHECK(info()->IsStub());
-    CallInterfaceDescriptor descriptor =
-        info()->code_stub()->GetCallInterfaceDescriptor();
+    CallInterfaceDescriptor descriptor = graph()->descriptor();
     int index = static_cast<int>(instr->index());
     Register reg = descriptor.GetRegisterParameter(index);
     return DefineFixed(result, reg);
@@ -2424,17 +2409,12 @@
       Retry(kTooManySpillSlotsNeededForOSR);
       spill_index = 0;
     }
+    spill_index += StandardFrameConstants::kFixedSlotCount;
   }
   return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index);
 }
 
 
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  return MarkAsCall(DefineFixed(new (zone()) LCallStub(context), r3), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
   // There are no real uses of the arguments object.
   // arguments.length and element access are supported directly on
@@ -2574,14 +2554,5 @@
   return new (zone()) LStoreFrameContext(context);
 }
 
-
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
-    HAllocateBlockContext* instr) {
-  LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LAllocateBlockContext* result =
-      new (zone()) LAllocateBlockContext(context, function);
-  return MarkAsCall(DefineFixed(result, cp), instr);
-}
 }  // namespace internal
 }  // namespace v8
diff --git a/src/crankshaft/ppc/lithium-ppc.h b/src/crankshaft/ppc/lithium-ppc.h
index e86edc9..0dfde05 100644
--- a/src/crankshaft/ppc/lithium-ppc.h
+++ b/src/crankshaft/ppc/lithium-ppc.h
@@ -21,7 +21,6 @@
   V(AccessArgumentsAt)                       \
   V(AddI)                                    \
   V(Allocate)                                \
-  V(AllocateBlockContext)                    \
   V(ApplyArguments)                          \
   V(ArgumentsElements)                       \
   V(ArgumentsLength)                         \
@@ -35,7 +34,6 @@
   V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
-  V(CallStub)                                \
   V(CheckArrayBufferNotNeutered)             \
   V(CheckInstanceType)                       \
   V(CheckNonSmi)                             \
@@ -47,7 +45,6 @@
   V(ClampIToUint8)                           \
   V(ClampTToUint8)                           \
   V(ClassOfTestAndBranch)                    \
-  V(CompareMinusZeroAndBranch)               \
   V(CompareNumericAndBranch)                 \
   V(CmpObjectEqAndBranch)                    \
   V(CmpHoleAndBranch)                        \
@@ -101,7 +98,6 @@
   V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
   V(LoadNamedGeneric)                        \
-  V(MapEnumLength)                           \
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathExp)                                 \
@@ -452,17 +448,6 @@
 };
 
 
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallStub(LOperand* context) { inputs_[0] = context; }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
-  DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
 class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
  public:
   bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -954,22 +939,6 @@
 };
 
 
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
-  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
-                               "cmp-minus-zero-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
 class LIsStringAndBranch final : public LControlInstruction<1, 1> {
  public:
   LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1113,8 +1082,6 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
   DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
 
-  Strength strength() { return hydrogen()->strength(); }
-
   Token::Value op() const { return hydrogen()->token(); }
 };
 
@@ -1314,16 +1281,6 @@
 };
 
 
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMapEnumLength(LOperand* value) { inputs_[0] = value; }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
 class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1444,8 +1401,6 @@
 
   DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
 
-  Strength strength() { return hydrogen()->strength(); }
-
  private:
   Token::Value op_;
 };
@@ -2500,23 +2455,6 @@
 };
 
 
-class LAllocateBlockContext : public LTemplateInstruction<1, 2, 0> {
- public:
-  LAllocateBlockContext(LOperand* context, LOperand* function) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-
-  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/typing.cc b/src/crankshaft/typing.cc
index df50f81..69d7efe 100644
--- a/src/crankshaft/typing.cc
+++ b/src/crankshaft/typing.cc
@@ -45,7 +45,7 @@
 
 Effect AstTyper::ObservedOnStack(Object* value) {
   Type* lower = Type::NowOf(value, zone());
-  return Effect(Bounds(lower, Type::Any(zone())));
+  return Effect(Bounds(lower, Type::Any()));
 }
 
 
@@ -393,7 +393,7 @@
 
 void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
   // TODO(rossberg): Reintroduce RegExp type.
-  NarrowType(expr, Bounds(Type::Object(zone())));
+  NarrowType(expr, Bounds(Type::Object()));
 }
 
 
@@ -421,7 +421,7 @@
     RECURSE(Visit(prop->value()));
   }
 
-  NarrowType(expr, Bounds(Type::Object(zone())));
+  NarrowType(expr, Bounds(Type::Object()));
 }
 
 
@@ -432,7 +432,7 @@
     RECURSE(Visit(value));
   }
 
-  NarrowType(expr, Bounds(Type::Object(zone())));
+  NarrowType(expr, Bounds(Type::Object()));
 }
 
 
@@ -485,7 +485,7 @@
   RECURSE(Visit(expr->exception()));
   // TODO(rossberg): is it worth having a non-termination effect?
 
-  NarrowType(expr, Bounds(Type::None(zone())));
+  NarrowType(expr, Bounds(Type::None()));
 }
 
 
@@ -569,7 +569,7 @@
     RECURSE(Visit(arg));
   }
 
-  NarrowType(expr, Bounds(Type::None(zone()), Type::Receiver(zone())));
+  NarrowType(expr, Bounds(Type::None(), Type::Receiver()));
 }
 
 
@@ -596,13 +596,13 @@
   switch (expr->op()) {
     case Token::NOT:
     case Token::DELETE:
-      NarrowType(expr, Bounds(Type::Boolean(zone())));
+      NarrowType(expr, Bounds(Type::Boolean()));
       break;
     case Token::VOID:
-      NarrowType(expr, Bounds(Type::Undefined(zone())));
+      NarrowType(expr, Bounds(Type::Undefined()));
       break;
     case Token::TYPEOF:
-      NarrowType(expr, Bounds(Type::InternalizedString(zone())));
+      NarrowType(expr, Bounds(Type::InternalizedString()));
       break;
     default:
       UNREACHABLE();
@@ -624,7 +624,7 @@
 
   RECURSE(Visit(expr->expression()));
 
-  NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
+  NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
 
   VariableProxy* proxy = expr->expression()->AsVariableProxy();
   if (proxy != NULL && proxy->var()->IsStackAllocated()) {
@@ -679,8 +679,8 @@
       RECURSE(Visit(expr->right()));
       Type* upper = Type::Union(
           expr->left()->bounds().upper, expr->right()->bounds().upper, zone());
-      if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
-      Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
+      if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
+      Type* lower = Type::Intersect(Type::SignedSmall(), upper, zone());
       NarrowType(expr, Bounds(lower, upper));
       break;
     }
@@ -689,8 +689,7 @@
     case Token::SAR:
       RECURSE(Visit(expr->left()));
       RECURSE(Visit(expr->right()));
-      NarrowType(expr,
-          Bounds(Type::SignedSmall(zone()), Type::Signed32(zone())));
+      NarrowType(expr, Bounds(Type::SignedSmall(), Type::Signed32()));
       break;
     case Token::SHR:
       RECURSE(Visit(expr->left()));
@@ -698,7 +697,7 @@
       // TODO(rossberg): The upper bound would be Unsigned32, but since there
       // is no 'positive Smi' type for the lower bound, we use the smallest
       // union of Smi and Unsigned32 as upper bound instead.
-      NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
+      NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
       break;
     case Token::ADD: {
       RECURSE(Visit(expr->left()));
@@ -706,17 +705,19 @@
       Bounds l = expr->left()->bounds();
       Bounds r = expr->right()->bounds();
       Type* lower =
-          !l.lower->IsInhabited() || !r.lower->IsInhabited() ?
-              Type::None(zone()) :
-          l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
-              Type::String(zone()) :
-          l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
-              Type::SignedSmall(zone()) : Type::None(zone());
+          !l.lower->IsInhabited() || !r.lower->IsInhabited()
+              ? Type::None()
+              : l.lower->Is(Type::String()) || r.lower->Is(Type::String())
+                    ? Type::String()
+                    : l.lower->Is(Type::Number()) && r.lower->Is(Type::Number())
+                          ? Type::SignedSmall()
+                          : Type::None();
       Type* upper =
-          l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
-              Type::String(zone()) :
-          l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
-              Type::Number(zone()) : Type::NumberOrString(zone());
+          l.upper->Is(Type::String()) || r.upper->Is(Type::String())
+              ? Type::String()
+              : l.upper->Is(Type::Number()) && r.upper->Is(Type::Number())
+                    ? Type::Number()
+                    : Type::NumberOrString();
       NarrowType(expr, Bounds(lower, upper));
       break;
     }
@@ -726,7 +727,7 @@
     case Token::MOD:
       RECURSE(Visit(expr->left()));
       RECURSE(Visit(expr->right()));
-      NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
+      NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
       break;
     default:
       UNREACHABLE();
@@ -748,11 +749,11 @@
   RECURSE(Visit(expr->left()));
   RECURSE(Visit(expr->right()));
 
-  NarrowType(expr, Bounds(Type::Boolean(zone())));
+  NarrowType(expr, Bounds(Type::Boolean()));
 }
 
 
-void AstTyper::VisitSpread(Spread* expr) { RECURSE(Visit(expr->expression())); }
+void AstTyper::VisitSpread(Spread* expr) { UNREACHABLE(); }
 
 
 void AstTyper::VisitEmptyParentheses(EmptyParentheses* expr) {
@@ -760,8 +761,7 @@
 }
 
 
-void AstTyper::VisitThisFunction(ThisFunction* expr) {
-}
+void AstTyper::VisitThisFunction(ThisFunction* expr) {}
 
 
 void AstTyper::VisitSuperPropertyReference(SuperPropertyReference* expr) {}
@@ -770,8 +770,7 @@
 void AstTyper::VisitSuperCallReference(SuperCallReference* expr) {}
 
 
-void AstTyper::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* expr) {
+void AstTyper::VisitRewritableExpression(RewritableExpression* expr) {
   Visit(expr->expression());
 }
 
diff --git a/src/crankshaft/x64/lithium-codegen-x64.cc b/src/crankshaft/x64/lithium-codegen-x64.cc
index 3f7e9ba..849b4b3 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -65,7 +65,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   DCHECK(is_done());
-  code->set_stack_slots(GetStackSlotCount());
+  code->set_stack_slots(GetTotalFrameSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
 }
@@ -118,13 +118,6 @@
 
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-      __ int3();
-    }
-#endif
   }
 
   info()->set_prologue_offset(masm_->pc_offset());
@@ -413,7 +406,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   DCHECK(is_done());
-  safepoints_.Emit(masm(), GetStackSlotCount());
+  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
   return !is_aborted();
 }
 
@@ -514,7 +507,7 @@
   // representable as an Operand.
   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return Operand(rbp, StackSlotOffset(op->index()));
+    return Operand(rbp, FrameSlotToFPOffset(op->index()));
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -582,9 +575,6 @@
 
   if (op->IsStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     if (is_tagged) {
       translation->StoreStackSlot(index);
     } else if (is_uint32) {
@@ -594,9 +584,6 @@
     }
   } else if (op->IsDoubleStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     translation->StoreDoubleStackSlot(index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -913,26 +900,6 @@
 }
 
 
-void LCodeGen::DoCallStub(LCallStub* instr) {
-  DCHECK(ToRegister(instr->context()).is(rsi));
-  DCHECK(ToRegister(instr->result()).is(rax));
-  switch (instr->hydrogen()->major_key()) {
-    case CodeStub::RegExpExec: {
-      RegExpExecStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::SubString: {
-      SubStringStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   GenerateOsrPrologue();
 }
@@ -1660,13 +1627,6 @@
 }
 
 
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
-  Register result = ToRegister(instr->result());
-  Register map = ToRegister(instr->value());
-  __ EnumLength(result, map);
-}
-
-
 Operand LCodeGen::BuildSeqStringOperand(Register string,
                                         LOperand* index,
                                         String::Encoding encoding) {
@@ -1961,8 +1921,7 @@
   DCHECK(ToRegister(instr->right()).is(rax));
   DCHECK(ToRegister(instr->result()).is(rax));
 
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2208,8 +2167,9 @@
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
-        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+                         ? instr->TrueDestination(chunk_)
+                         : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
@@ -2296,33 +2256,6 @@
 }
 
 
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
-  Representation rep = instr->hydrogen()->value()->representation();
-  DCHECK(!rep.IsInteger32());
-
-  if (rep.IsDouble()) {
-    XMMRegister value = ToDoubleRegister(instr->value());
-    XMMRegister xmm_scratch = double_scratch0();
-    __ Xorpd(xmm_scratch, xmm_scratch);
-    __ Ucomisd(xmm_scratch, value);
-    EmitFalseBranch(instr, not_equal);
-    __ Movmskpd(kScratchRegister, value);
-    __ testl(kScratchRegister, Immediate(1));
-    EmitBranch(instr, not_zero);
-  } else {
-    Register value = ToRegister(instr->value());
-    Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
-    __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
-    __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
-            Immediate(0x1));
-    EmitFalseBranch(instr, no_overflow);
-    __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
-            Immediate(0x00000000));
-    EmitBranch(instr, equal);
-  }
-}
-
-
 Condition LCodeGen::EmitIsString(Register input,
                                  Register temp1,
                                  Label* is_not_string,
@@ -2569,8 +2502,7 @@
   DCHECK(ToRegister(instr->context()).is(rsi));
   Token::Value op = instr->op();
 
-  Handle<Code> ic =
-      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = TokenToCondition(op, false);
@@ -2658,9 +2590,9 @@
 
   __ Move(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
-                                         SLOPPY, PREMONOMORPHIC).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2775,10 +2707,10 @@
 
   __ Move(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(
-          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
-          instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), NOT_INSIDE_TYPEOF,
+                        instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2909,6 +2841,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -3064,8 +2999,8 @@
   }
 
   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3740,21 +3675,23 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  HCallFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(rsi));
   DCHECK(ToRegister(instr->function()).is(rdi));
   DCHECK(ToRegister(instr->result()).is(rax));
 
   int arity = instr->arity();
-  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
-  if (instr->hydrogen()->HasVectorAndSlot()) {
+
+  ConvertReceiverMode mode = hinstr->convert_mode();
+  if (hinstr->HasVectorAndSlot()) {
     Register slot_register = ToRegister(instr->temp_slot());
     Register vector_register = ToRegister(instr->temp_vector());
     DCHECK(slot_register.is(rdx));
     DCHECK(vector_register.is(rbx));
 
     AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-    int index = vector->GetIndex(instr->hydrogen()->slot());
+    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+    int index = vector->GetIndex(hinstr->slot());
 
     __ Move(vector_register, vector);
     __ Move(slot_register, Smi::FromInt(index));
@@ -4107,6 +4044,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -5348,8 +5288,8 @@
     final_branch_condition = equal;
 
   } else if (String::Equals(type_name, factory->undefined_string())) {
-    __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
-    __ j(equal, true_label, true_distance);
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    __ j(equal, false_label, false_distance);
     __ JumpIfSmi(input, false_label, false_distance);
     // Check for undetectable objects => true.
     __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
@@ -5519,17 +5459,8 @@
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   DCHECK(ToRegister(instr->context()).is(rsi));
 
-  Condition cc = masm()->CheckSmi(rax);
-  DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
-
-  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
-  __ CmpObjectType(rax, JS_PROXY_TYPE, rcx);
-  DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
-
   Label use_cache, call_runtime;
-  Register null_value = rdi;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ CheckEnumCache(null_value, &call_runtime);
+  __ CheckEnumCache(&call_runtime);
 
   __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
   __ jmp(&use_cache, Label::kNear);
@@ -5537,11 +5468,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ Push(rax);
-  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
-  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                 Heap::kMetaMapRootIndex);
-  DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+  CallRuntime(Runtime::kForInEnumerate, instr);
   __ bind(&use_cache);
 }
 
@@ -5653,15 +5580,6 @@
 }
 
 
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
-  Handle<ScopeInfo> scope_info = instr->scope_info();
-  __ Push(scope_info);
-  __ Push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kPushBlockContext, instr);
-  RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/x64/lithium-codegen-x64.h b/src/crankshaft/x64/lithium-codegen-x64.h
index 6fb918b..873a3dd 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.h
+++ b/src/crankshaft/x64/lithium-codegen-x64.h
@@ -45,10 +45,8 @@
   }
 
   bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 ||
-        info()->is_non_deferred_calling() ||
-        !info()->IsStub() ||
-        info()->requires_frame();
+    return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+           !info()->IsStub() || info()->requires_frame();
   }
   bool NeedsDeferredFrame() const {
     return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -128,7 +126,13 @@
                        Register temporary,
                        Register scratch);
 
-  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  bool HasAllocatedStackSlots() const {
+    return chunk()->HasAllocatedStackSlots();
+  }
+  int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+  int GetTotalFrameSlotCount() const {
+    return chunk()->GetTotalFrameSlotCount();
+  }
 
   void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
 
diff --git a/src/crankshaft/x64/lithium-x64.cc b/src/crankshaft/x64/lithium-x64.cc
index 3c932a2..6be4093 100644
--- a/src/crankshaft/x64/lithium-x64.cc
+++ b/src/crankshaft/x64/lithium-x64.cc
@@ -334,15 +334,15 @@
 int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
   if (kind == DOUBLE_REGISTERS && kDoubleSize == 2 * kPointerSize) {
     // Skip a slot if for a double-width slot for x32 port.
-    spill_slot_count_++;
+    current_frame_slots_++;
     // The spill slot's address is at rbp - (index + 1) * kPointerSize -
     // StandardFrameConstants::kFixedFrameSizeFromFp. kFixedFrameSizeFromFp is
     // 2 * kPointerSize, if rbp is aligned at 8-byte boundary, the below "|= 1"
     // will make sure the spilled doubles are aligned at 8-byte boundary.
     // TODO(haitao): make sure rbp is aligned at 8-byte boundary for x32 port.
-    spill_slot_count_ |= 1;
+    current_frame_slots_ |= 1;
   }
-  return spill_slot_count_++;
+  return current_frame_slots_++;
 }
 
 
@@ -1511,10 +1511,18 @@
     DCHECK(instr->left()->representation().Equals(instr->representation()));
     DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
-    LOperand* right = UseOrConstant(instr->BetterRightOperand());
+    HValue* h_right = instr->BetterRightOperand();
+    LOperand* right = UseOrConstant(h_right);
     LMulI* mul = new(zone()) LMulI(left, right);
-    if (instr->CheckFlag(HValue::kCanOverflow) ||
-        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    int constant_value =
+        h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0;
+    // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls
+    // |DeoptimizeIf|.
+    bool needs_environment =
+        instr->CheckFlag(HValue::kCanOverflow) ||
+        (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+         (!right->IsConstantOperand() || constant_value <= 0));
+    if (needs_environment) {
       AssignEnvironment(mul);
     }
     return DefineSameAsFirst(mul);
@@ -1694,13 +1702,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
-    HCompareMinusZeroAndBranch* instr) {
-  LOperand* value = UseRegister(instr->value());
-  return new(zone()) LCompareMinusZeroAndBranch(value);
-}
-
-
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
@@ -1773,12 +1774,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
-  LOperand* map = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
 LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
   LOperand* string = UseRegisterAtStart(instr->string());
   LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2485,8 +2480,7 @@
     return DefineAsSpilled(result, spill_index);
   } else {
     DCHECK(info()->IsStub());
-    CallInterfaceDescriptor descriptor =
-        info()->code_stub()->GetCallInterfaceDescriptor();
+    CallInterfaceDescriptor descriptor = graph()->descriptor();
     int index = static_cast<int>(instr->index());
     Register reg = descriptor.GetRegisterParameter(index);
     return DefineFixed(result, reg);
@@ -2507,18 +2501,12 @@
       Retry(kTooManySpillSlotsNeededForOSR);
       spill_index = 0;
     }
+    spill_index += StandardFrameConstants::kFixedSlotCount;
   }
   return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
 
 
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
-  LOperand* context = UseFixed(instr->context(), rsi);
-  LCallStub* result = new(zone()) LCallStub(context);
-  return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
   // There are no real uses of the arguments object.
   // arguments.length and element access are supported directly on
@@ -2669,16 +2657,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
-    HAllocateBlockContext* instr) {
-  LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LAllocateBlockContext* result =
-      new(zone()) LAllocateBlockContext(context, function);
-  return MarkAsCall(DefineFixed(result, rsi), instr);
-}
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/x64/lithium-x64.h b/src/crankshaft/x64/lithium-x64.h
index ebe1ef9..406159b 100644
--- a/src/crankshaft/x64/lithium-x64.h
+++ b/src/crankshaft/x64/lithium-x64.h
@@ -21,7 +21,6 @@
   V(AccessArgumentsAt)                       \
   V(AddI)                                    \
   V(Allocate)                                \
-  V(AllocateBlockContext)                    \
   V(ApplyArguments)                          \
   V(ArgumentsElements)                       \
   V(ArgumentsLength)                         \
@@ -35,7 +34,6 @@
   V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
-  V(CallStub)                                \
   V(CheckArrayBufferNotNeutered)             \
   V(CheckInstanceType)                       \
   V(CheckMaps)                               \
@@ -47,7 +45,6 @@
   V(ClampIToUint8)                           \
   V(ClampTToUint8)                           \
   V(ClassOfTestAndBranch)                    \
-  V(CompareMinusZeroAndBranch)               \
   V(CompareNumericAndBranch)                 \
   V(CmpObjectEqAndBranch)                    \
   V(CmpHoleAndBranch)                        \
@@ -101,7 +98,6 @@
   V(LoadKeyedGeneric)                        \
   V(LoadNamedField)                          \
   V(LoadNamedGeneric)                        \
-  V(MapEnumLength)                           \
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathExp)                                 \
@@ -464,19 +460,6 @@
 };
 
 
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallStub(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
-  DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
 class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
  public:
   bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -968,20 +951,6 @@
 };
 
 
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 0> {
- public:
-  explicit LCompareMinusZeroAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
-                               "cmp-minus-zero-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
 class LIsStringAndBranch final : public LControlInstruction<1, 1> {
  public:
   explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1136,8 +1105,6 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
   DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
 
-  Strength strength() { return hydrogen()->strength(); }
-
   Token::Value op() const { return hydrogen()->token(); }
 };
 
@@ -1330,18 +1297,6 @@
 };
 
 
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMapEnumLength(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
 class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1472,8 +1427,6 @@
 
   DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
 
-  Strength strength() { return hydrogen()->strength(); }
-
  private:
   Token::Value op_;
 };
@@ -2587,23 +2540,6 @@
 };
 
 
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
-  LAllocateBlockContext(LOperand* context, LOperand* function) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-
-  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/crankshaft/x87/lithium-codegen-x87.cc b/src/crankshaft/x87/lithium-codegen-x87.cc
index fe2baa5..a8f22be 100644
--- a/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -20,7 +20,6 @@
 namespace v8 {
 namespace internal {
 
-
 // When invoking builtins, we need to record the safepoint in the middle of
 // the invoke instruction sequence generated by the macro assembler.
 class SafepointGenerator final : public CallWrapper {
@@ -75,7 +74,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   DCHECK(is_done());
-  code->set_stack_slots(GetStackSlotCount());
+  code->set_stack_slots(GetTotalFrameSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
@@ -100,13 +99,6 @@
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-      __ int3();
-    }
-#endif
-
     if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
       // Move state of dynamic frame alignment into edx.
       __ Move(edx, Immediate(kNoAlignmentPadding));
@@ -493,7 +485,7 @@
       masm()->nop();
     }
   }
-  safepoints_.Emit(masm(), GetStackSlotCount());
+  safepoints_.Emit(masm(), GetTotalFrameSlotCount());
   return !is_aborted();
 }
 
@@ -846,7 +838,7 @@
   DCHECK(!op->IsDoubleRegister());
   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return Operand(ebp, StackSlotOffset(op->index()));
+    return Operand(ebp, FrameSlotToFPOffset(op->index()));
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -858,7 +850,7 @@
 Operand LCodeGen::HighOperand(LOperand* op) {
   DCHECK(op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
-    return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+    return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize);
   } else {
     // Retrieve parameter without eager stack-frame relative to the
     // stack-pointer.
@@ -931,9 +923,6 @@
 
   if (op->IsStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     if (is_tagged) {
       translation->StoreStackSlot(index);
     } else if (is_uint32) {
@@ -943,9 +932,6 @@
     }
   } else if (op->IsDoubleStackSlot()) {
     int index = op->index();
-    if (index >= 0) {
-      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
-    }
     translation->StoreDoubleStackSlot(index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -1284,26 +1270,6 @@
 }
 
 
-void LCodeGen::DoCallStub(LCallStub* instr) {
-  DCHECK(ToRegister(instr->context()).is(esi));
-  DCHECK(ToRegister(instr->result()).is(eax));
-  switch (instr->hydrogen()->major_key()) {
-    case CodeStub::RegExpExec: {
-      RegExpExecStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::SubString: {
-      SubStringStub stub(isolate());
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-}
-
-
 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   GenerateOsrPrologue();
 }
@@ -1945,13 +1911,6 @@
 }
 
 
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
-  Register result = ToRegister(instr->result());
-  Register map = ToRegister(instr->value());
-  __ EnumLength(result, map);
-}
-
-
 Operand LCodeGen::BuildSeqStringOperand(Register string,
                                         LOperand* index,
                                         String::Encoding encoding) {
@@ -2121,7 +2080,7 @@
       __ fstp_s(MemOperand(esp, 0));
       __ fstp_s(MemOperand(esp, kPointerSize));
       __ pop(scratch_reg);
-      __ xor_(MemOperand(esp, 0), scratch_reg);
+      __ or_(MemOperand(esp, 0), scratch_reg);
       X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
       __ pop(scratch_reg);  // restore esp
     } else {
@@ -2200,8 +2159,7 @@
   DCHECK(ToRegister(instr->right()).is(eax));
   DCHECK(ToRegister(instr->result()).is(eax));
 
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   CallCode(code, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2443,8 +2401,9 @@
     // We can statically evaluate the comparison.
     double left_val = ToDouble(LConstantOperand::cast(left));
     double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
-        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+                         ? instr->TrueDestination(chunk_)
+                         : instr->FalseDestination(chunk_);
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
@@ -2518,29 +2477,6 @@
 }
 
 
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
-  Representation rep = instr->hydrogen()->value()->representation();
-  DCHECK(!rep.IsInteger32());
-
-  if (rep.IsDouble()) {
-    X87Register input = ToX87Register(instr->value());
-    X87LoadForUsage(input);
-    __ FXamMinusZero();
-    EmitBranch(instr, equal);
-  } else {
-    Register value = ToRegister(instr->value());
-    Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
-    __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
-    __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
-           Immediate(0x1));
-    EmitFalseBranch(instr, no_overflow);
-    __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
-           Immediate(0x00000000));
-    EmitBranch(instr, equal);
-  }
-}
-
-
 Condition LCodeGen::EmitIsString(Register input,
                                  Register temp1,
                                  Label* is_not_string,
@@ -2799,8 +2735,7 @@
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
-  Handle<Code> ic =
-      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
@@ -2924,9 +2859,9 @@
 
   __ mov(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
-                                         SLOPPY, PREMONOMORPHIC).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3035,10 +2970,10 @@
 
   __ mov(LoadDescriptor::NameRegister(), instr->name());
   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
-  Handle<Code> ic =
-      CodeFactory::LoadICInOptimizedCode(
-          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
-          instr->hydrogen()->initialization_state()).code();
+  Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+                        isolate(), NOT_INSIDE_TYPEOF,
+                        instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3150,6 +3085,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -3268,8 +3206,8 @@
   }
 
   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
-                        isolate(), instr->hydrogen()->language_mode(),
-                        instr->hydrogen()->initialization_state()).code();
+                        isolate(), instr->hydrogen()->initialization_state())
+                        .code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4010,21 +3948,22 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  HCallFunction* hinstr = instr->hydrogen();
   DCHECK(ToRegister(instr->context()).is(esi));
   DCHECK(ToRegister(instr->function()).is(edi));
   DCHECK(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
-  if (instr->hydrogen()->HasVectorAndSlot()) {
+  ConvertReceiverMode mode = hinstr->convert_mode();
+  if (hinstr->HasVectorAndSlot()) {
     Register slot_register = ToRegister(instr->temp_slot());
     Register vector_register = ToRegister(instr->temp_vector());
     DCHECK(slot_register.is(edx));
     DCHECK(vector_register.is(ebx));
 
     AllowDeferredHandleDereference vector_structure_check;
-    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
-    int index = vector->GetIndex(instr->hydrogen()->slot());
+    Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+    int index = vector->GetIndex(hinstr->slot());
 
     __ mov(vector_register, vector);
     __ mov(slot_register, Immediate(Smi::FromInt(index)));
@@ -4320,6 +4259,9 @@
       case DICTIONARY_ELEMENTS:
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
         UNREACHABLE();
         break;
     }
@@ -5713,8 +5655,8 @@
     final_branch_condition = equal;
 
   } else if (String::Equals(type_name, factory()->undefined_string())) {
-    __ cmp(input, factory()->undefined_value());
-    __ j(equal, true_label, true_distance);
+    __ cmp(input, factory()->null_value());
+    __ j(equal, false_label, false_distance);
     __ JumpIfSmi(input, false_label, false_distance);
     // Check for undetectable objects => true.
     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
@@ -5888,12 +5830,6 @@
 
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
-  __ test(eax, Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
-
-  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
-  __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
-  DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
 
   Label use_cache, call_runtime;
   __ CheckEnumCache(&call_runtime);
@@ -5904,11 +5840,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(eax);
-  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
-  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-         isolate()->factory()->meta_map());
-  DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+  CallRuntime(Runtime::kForInEnumerate, instr);
   __ bind(&use_cache);
 }
 
@@ -6021,15 +5953,6 @@
 }
 
 
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
-  Handle<ScopeInfo> scope_info = instr->scope_info();
-  __ Push(scope_info);
-  __ push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kPushBlockContext, instr);
-  RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
 #undef __
 
 }  // namespace internal
diff --git a/src/crankshaft/x87/lithium-codegen-x87.h b/src/crankshaft/x87/lithium-codegen-x87.h
index 6346344..0cfbf70 100644
--- a/src/crankshaft/x87/lithium-codegen-x87.h
+++ b/src/crankshaft/x87/lithium-codegen-x87.h
@@ -50,10 +50,8 @@
   }
 
   bool NeedsEagerFrame() const {
-    return GetStackSlotCount() > 0 ||
-        info()->is_non_deferred_calling() ||
-        !info()->IsStub() ||
-        info()->requires_frame();
+    return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+           !info()->IsStub() || info()->requires_frame();
   }
   bool NeedsDeferredFrame() const {
     return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -165,7 +163,13 @@
                        Register temporary,
                        Register temporary2);
 
-  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  bool HasAllocatedStackSlots() const {
+    return chunk()->HasAllocatedStackSlots();
+  }
+  int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+  int GetTotalFrameSlotCount() const {
+    return chunk()->GetTotalFrameSlotCount();
+  }
 
   void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
 
diff --git a/src/crankshaft/x87/lithium-x87.cc b/src/crankshaft/x87/lithium-x87.cc
index b422e12..f770509 100644
--- a/src/crankshaft/x87/lithium-x87.cc
+++ b/src/crankshaft/x87/lithium-x87.cc
@@ -355,11 +355,11 @@
 int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
   // Skip a slot if for a double-width slot.
   if (kind == DOUBLE_REGISTERS) {
-    spill_slot_count_++;
-    spill_slot_count_ |= 1;
+    current_frame_slots_++;
+    current_frame_slots_ |= 1;
     num_double_slots_++;
   }
-  return spill_slot_count_++;
+  return current_frame_slots_++;
 }
 
 
@@ -448,7 +448,7 @@
   // Reserve the first spill slot for the state of dynamic alignment.
   if (info()->IsOptimizing()) {
     int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
-    DCHECK_EQ(alignment_state_index, 0);
+    DCHECK_EQ(alignment_state_index, 4);
     USE(alignment_state_index);
   }
 
@@ -1534,14 +1534,22 @@
     DCHECK(instr->left()->representation().Equals(instr->representation()));
     DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
-    LOperand* right = UseOrConstant(instr->BetterRightOperand());
+    HValue* h_right = instr->BetterRightOperand();
+    LOperand* right = UseOrConstant(h_right);
     LOperand* temp = NULL;
     if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
       temp = TempRegister();
     }
     LMulI* mul = new(zone()) LMulI(left, right, temp);
-    if (instr->CheckFlag(HValue::kCanOverflow) ||
-        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    int constant_value =
+        h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0;
+    // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls
+    // |DeoptimizeIf|.
+    bool needs_environment =
+        instr->CheckFlag(HValue::kCanOverflow) ||
+        (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+         (!right->IsConstantOperand() || constant_value <= 0));
+    if (needs_environment) {
       AssignEnvironment(mul);
     }
     return DefineSameAsFirst(mul);
@@ -1707,13 +1715,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
-    HCompareMinusZeroAndBranch* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return new (zone()) LCompareMinusZeroAndBranch(value);
-}
-
-
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
   DCHECK(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
@@ -1785,12 +1786,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
-  LOperand* map = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
 LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
   LOperand* string = UseRegisterAtStart(instr->string());
   LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2496,8 +2491,7 @@
     return DefineAsSpilled(result, spill_index);
   } else {
     DCHECK(info()->IsStub());
-    CallInterfaceDescriptor descriptor =
-        info()->code_stub()->GetCallInterfaceDescriptor();
+    CallInterfaceDescriptor descriptor = graph()->descriptor();
     int index = static_cast<int>(instr->index());
     Register reg = descriptor.GetRegisterParameter(index);
     return DefineFixed(result, reg);
@@ -2523,18 +2517,12 @@
       // The first local is saved at the end of the unoptimized frame.
       spill_index = graph()->osr()->UnoptimizedFrameSlots();
     }
+    spill_index += StandardFrameConstants::kFixedSlotCount;
   }
   return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
 }
 
 
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LCallStub* result = new(zone()) LCallStub(context);
-  return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
   // There are no real uses of the arguments object.
   // arguments.length and element access are supported directly on
@@ -2684,16 +2672,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
-    HAllocateBlockContext* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LAllocateBlockContext* result =
-      new(zone()) LAllocateBlockContext(context, function);
-  return MarkAsCall(DefineFixed(result, esi), instr);
-}
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/x87/lithium-x87.h b/src/crankshaft/x87/lithium-x87.h
index e033902..0f2813f 100644
--- a/src/crankshaft/x87/lithium-x87.h
+++ b/src/crankshaft/x87/lithium-x87.h
@@ -24,7 +24,6 @@
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
   V(AccessArgumentsAt)                       \
   V(AddI)                                    \
-  V(AllocateBlockContext)                    \
   V(Allocate)                                \
   V(ApplyArguments)                          \
   V(ArgumentsElements)                       \
@@ -39,7 +38,6 @@
   V(CallFunction)                            \
   V(CallNewArray)                            \
   V(CallRuntime)                             \
-  V(CallStub)                                \
   V(CheckArrayBufferNotNeutered)             \
   V(CheckInstanceType)                       \
   V(CheckMaps)                               \
@@ -52,7 +50,6 @@
   V(ClampTToUint8NoSSE2)                     \
   V(ClassOfTestAndBranch)                    \
   V(ClobberDoubles)                          \
-  V(CompareMinusZeroAndBranch)               \
   V(CompareNumericAndBranch)                 \
   V(CmpObjectEqAndBranch)                    \
   V(CmpHoleAndBranch)                        \
@@ -106,7 +103,6 @@
   V(LoadNamedField)                          \
   V(LoadNamedGeneric)                        \
   V(LoadRoot)                                \
-  V(MapEnumLength)                           \
   V(MathAbs)                                 \
   V(MathClz32)                               \
   V(MathExp)                                 \
@@ -471,19 +467,6 @@
 };
 
 
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallStub(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
-  DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
 class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
  public:
   bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -991,18 +974,6 @@
 };
 
 
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 0> {
- public:
-  explicit LCompareMinusZeroAndBranch(LOperand* value) { inputs_[0] = value; }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
-                               "cmp-minus-zero-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
 class LIsStringAndBranch final : public LControlInstruction<1, 1> {
  public:
   LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1152,8 +1123,6 @@
   DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
   DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
 
-  Strength strength() { return hydrogen()->strength(); }
-
   LOperand* context() { return inputs_[0]; }
   Token::Value op() const { return hydrogen()->token(); }
 };
@@ -1344,18 +1313,6 @@
 };
 
 
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMapEnumLength(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
 class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1489,8 +1446,6 @@
 
   DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
 
-  Strength strength() { return hydrogen()->strength(); }
-
  private:
   Token::Value op_;
 };
@@ -2617,23 +2572,6 @@
 };
 
 
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
-  LAllocateBlockContext(LOperand* context, LOperand* function) {
-    inputs_[0] = context;
-    inputs_[1] = function;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-
-  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
 class LChunkBuilder;
 class LPlatformChunk final : public LChunk {
  public:
diff --git a/src/d8.cc b/src/d8.cc
index c58c172..7c9a24f 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -26,15 +26,12 @@
 #include "include/v8-testing.h"
 #endif  // V8_SHARED
 
-#if !defined(V8_SHARED) && defined(ENABLE_GDB_JIT_INTERFACE)
-#include "src/gdb-jit.h"
-#endif
-
 #ifdef ENABLE_VTUNE_JIT_INTERFACE
 #include "src/third_party/vtune/v8-vtune.h"
 #endif
 
 #include "src/d8.h"
+#include "src/ostreams.h"
 
 #include "include/libplatform/libplatform.h"
 #ifndef V8_SHARED
@@ -375,6 +372,7 @@
                           bool report_exceptions, SourceType source_type) {
   HandleScope handle_scope(isolate);
   TryCatch try_catch(isolate);
+  try_catch.SetVerbose(true);
 
   MaybeLocal<Value> maybe_result;
   {
@@ -1247,6 +1245,10 @@
   return global_template;
 }
 
+static void EmptyMessageCallback(Local<Message> message, Local<Value> error) {
+  // Nothing to be done here, exceptions thrown up to the shell will be reported
+  // separately by {Shell::ReportException} after they are caught.
+}
 
 void Shell::Initialize(Isolate* isolate) {
 #ifndef V8_SHARED
@@ -1254,6 +1256,8 @@
   if (i::StrLength(i::FLAG_map_counters) != 0)
     MapCounters(isolate, i::FLAG_map_counters);
 #endif  // !V8_SHARED
+  // Disable default message reporting.
+  isolate->AddMessageListener(EmptyMessageCallback);
 }
 
 
@@ -2459,11 +2463,6 @@
     Shell::array_buffer_allocator = &shell_array_buffer_allocator;
   }
   create_params.array_buffer_allocator = Shell::array_buffer_allocator;
-#if !defined(V8_SHARED) && defined(ENABLE_GDB_JIT_INTERFACE)
-  if (i::FLAG_gdbjit) {
-    create_params.code_event_handler = i::GDBJITInterface::EventHandler;
-  }
-#endif
 #ifdef ENABLE_VTUNE_JIT_INTERFACE
   create_params.code_event_handler = vTune::GetVtuneCodeEventHandler();
 #endif
diff --git a/src/debug/arm/debug-arm.cc b/src/debug/arm/debug-arm.cc
index 2d4cbf1..5fdda4f 100644
--- a/src/debug/arm/debug-arm.cc
+++ b/src/debug/arm/debug-arm.cc
@@ -62,6 +62,10 @@
   patcher.masm()->blx(ip);
 }
 
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+  Instr current_instr = Assembler::instr_at(pc);
+  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
 
 void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
                                           DebugBreakCallHelperMode mode) {
diff --git a/src/debug/arm64/debug-arm64.cc b/src/debug/arm64/debug-arm64.cc
index c2b60a9..3e4b67c 100644
--- a/src/debug/arm64/debug-arm64.cc
+++ b/src/debug/arm64/debug-arm64.cc
@@ -74,6 +74,10 @@
   patcher.blr(ip0);
 }
 
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+  Instruction* current_instr = reinterpret_cast<Instruction*>(pc);
+  return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
+}
 
 void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
                                           DebugBreakCallHelperMode mode) {
diff --git a/src/debug/debug-evaluate.cc b/src/debug/debug-evaluate.cc
index e19b93e..8114c21 100644
--- a/src/debug/debug-evaluate.cc
+++ b/src/debug/debug-evaluate.cc
@@ -111,7 +111,7 @@
   // Skip the global proxy as it has no properties and always delegates to the
   // real global object.
   if (result->IsJSGlobalProxy()) {
-    PrototypeIterator iter(isolate, result);
+    PrototypeIterator iter(isolate, Handle<JSGlobalProxy>::cast(result));
     // TODO(verwaest): This will crash when the global proxy is detached.
     result = PrototypeIterator::GetCurrent<JSObject>(iter);
   }
@@ -128,7 +128,7 @@
       inlined_jsframe_index_(inlined_jsframe_index) {
   FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
   Handle<JSFunction> local_function =
-      handle(JSFunction::cast(frame_inspector.GetFunction()));
+      Handle<JSFunction>::cast(frame_inspector.GetFunction());
   Handle<Context> outer_context(local_function->context());
   native_context_ = Handle<Context>(outer_context->native_context());
   Handle<JSFunction> global_function(native_context_->closure());
@@ -302,8 +302,7 @@
   if (maybe.FromJust()) return;
 
   // FunctionGetArguments can't throw an exception.
-  Handle<JSObject> arguments =
-      Handle<JSObject>::cast(Accessors::FunctionGetArguments(function));
+  Handle<JSObject> arguments = Accessors::FunctionGetArguments(function);
   Handle<String> arguments_str = isolate_->factory()->arguments_string();
   JSObject::SetOwnPropertyIgnoreAttributes(target, arguments_str, arguments,
                                            NONE)
@@ -347,6 +346,7 @@
       // within debug-evaluate.
       continue;
     }
+    if (value->IsTheHole()) continue;  // Value is not initialized yet (in TDZ).
     JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
   }
 }
diff --git a/src/debug/debug-frames.cc b/src/debug/debug-frames.cc
index 012d291..25634be 100644
--- a/src/debug/debug-frames.cc
+++ b/src/debug/debug-frames.cc
@@ -15,6 +15,7 @@
   has_adapted_arguments_ = frame_->has_adapted_arguments();
   is_bottommost_ = inlined_jsframe_index == 0;
   is_optimized_ = frame_->is_optimized();
+  is_interpreted_ = frame_->is_interpreted();
   // Calculate the deoptimized frame.
   if (frame->is_optimized()) {
     // TODO(turbofan): Revisit once we support deoptimization.
@@ -44,33 +45,41 @@
                        : frame_->ComputeParametersCount();
 }
 
-
-Object* FrameInspector::GetFunction() {
-  return is_optimized_ ? deoptimized_frame_->GetFunction() : frame_->function();
+Handle<Object> FrameInspector::GetFunction() {
+  return is_optimized_ ? deoptimized_frame_->GetFunction()
+                       : handle(frame_->function(), isolate_);
 }
 
-
-Object* FrameInspector::GetParameter(int index) {
+Handle<Object> FrameInspector::GetParameter(int index) {
   return is_optimized_ ? deoptimized_frame_->GetParameter(index)
-                       : frame_->GetParameter(index);
+                       : handle(frame_->GetParameter(index), isolate_);
 }
 
-
-Object* FrameInspector::GetExpression(int index) {
+Handle<Object> FrameInspector::GetExpression(int index) {
   // TODO(turbofan): Revisit once we support deoptimization.
   if (frame_->LookupCode()->is_turbofanned() &&
       frame_->function()->shared()->asm_function() &&
       !FLAG_turbo_asm_deoptimization) {
-    return isolate_->heap()->undefined_value();
+    return isolate_->factory()->undefined_value();
   }
   return is_optimized_ ? deoptimized_frame_->GetExpression(index)
-                       : frame_->GetExpression(index);
+                       : handle(frame_->GetExpression(index), isolate_);
 }
 
 
 int FrameInspector::GetSourcePosition() {
-  return is_optimized_ ? deoptimized_frame_->GetSourcePosition()
-                       : frame_->LookupCode()->SourcePosition(frame_->pc());
+  if (is_optimized_) {
+    return deoptimized_frame_->GetSourcePosition();
+  } else if (is_interpreted_) {
+    InterpretedFrame* frame = reinterpret_cast<InterpretedFrame*>(frame_);
+    BytecodeArray* bytecode_array =
+        frame->function()->shared()->bytecode_array();
+    return bytecode_array->SourcePosition(frame->GetBytecodeOffset());
+  } else {
+    Code* code = frame_->LookupCode();
+    int offset = static_cast<int>(frame_->pc() - code->instruction_start());
+    return code->SourcePosition(offset);
+  }
 }
 
 
@@ -80,9 +89,9 @@
              : frame_->IsConstructor();
 }
 
-
-Object* FrameInspector::GetContext() {
-  return is_optimized_ ? deoptimized_frame_->GetContext() : frame_->context();
+Handle<Object> FrameInspector::GetContext() {
+  return is_optimized_ ? deoptimized_frame_->GetContext()
+                       : handle(frame_->context(), isolate_);
 }
 
 
@@ -92,6 +101,7 @@
   DCHECK(has_adapted_arguments_);
   frame_ = frame;
   is_optimized_ = frame_->is_optimized();
+  is_interpreted_ = frame_->is_interpreted();
   DCHECK(!is_optimized_);
 }
 
@@ -109,10 +119,10 @@
     Handle<String> name(scope_info->ParameterName(i));
     if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
 
-    Handle<Object> value(i < GetParametersCount()
-                             ? GetParameter(i)
-                             : isolate_->heap()->undefined_value(),
-                         isolate_);
+    Handle<Object> value =
+        i < GetParametersCount()
+            ? GetParameter(i)
+            : Handle<Object>::cast(isolate_->factory()->undefined_value());
     DCHECK(!value->IsTheHole());
 
     JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
@@ -122,8 +132,7 @@
   for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
     if (scope_info->LocalIsSynthetic(i)) continue;
     Handle<String> name(scope_info->StackLocalName(i));
-    Handle<Object> value(GetExpression(scope_info->StackLocalIndex(i)),
-                         isolate_);
+    Handle<Object> value = GetExpression(scope_info->StackLocalIndex(i));
     if (value->IsTheHole()) value = isolate_->factory()->undefined_value();
 
     JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
diff --git a/src/debug/debug-frames.h b/src/debug/debug-frames.h
index c0d20bb..c04fd2b 100644
--- a/src/debug/debug-frames.h
+++ b/src/debug/debug-frames.h
@@ -21,12 +21,12 @@
   ~FrameInspector();
 
   int GetParametersCount();
-  Object* GetFunction();
-  Object* GetParameter(int index);
-  Object* GetExpression(int index);
+  Handle<Object> GetFunction();
+  Handle<Object> GetParameter(int index);
+  Handle<Object> GetExpression(int index);
   int GetSourcePosition();
   bool IsConstructor();
-  Object* GetContext();
+  Handle<Object> GetContext();
 
   JavaScriptFrame* GetArgumentsFrame() { return frame_; }
   void SetArgumentsFrame(JavaScriptFrame* frame);
@@ -48,6 +48,7 @@
   DeoptimizedFrameInfo* deoptimized_frame_;
   Isolate* isolate_;
   bool is_optimized_;
+  bool is_interpreted_;
   bool is_bottommost_;
   bool has_adapted_arguments_;
 
diff --git a/src/debug/debug-scopes.cc b/src/debug/debug-scopes.cc
index 15a0594..e785384 100644
--- a/src/debug/debug-scopes.cc
+++ b/src/debug/debug-scopes.cc
@@ -28,7 +28,7 @@
     return;
   }
 
-  context_ = Handle<Context>(Context::cast(frame_inspector->GetContext()));
+  context_ = Handle<Context>::cast(frame_inspector->GetContext());
 
   // Catch the case when the debugger stops in an internal function.
   Handle<JSFunction> function = GetFunction();
@@ -58,12 +58,8 @@
     // return, which requires a debug info to be available.
     Handle<DebugInfo> debug_info(shared_info->GetDebugInfo());
 
-    // PC points to the instruction after the current one, possibly a break
-    // location as well. So the "- 1" to exclude it from the search.
-    Address call_pc = GetFrame()->pc() - 1;
-
     // Find the break point where execution has stopped.
-    BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
+    BreakLocation location = BreakLocation::FromFrame(debug_info, GetFrame());
 
     ignore_nested_scopes = location.IsReturn();
   }
@@ -462,7 +458,8 @@
       isolate_->factory()->NewJSObject(isolate_->object_function());
   frame_inspector_->MaterializeStackLocals(local_scope, function);
 
-  Handle<Context> frame_context(Context::cast(frame_inspector_->GetContext()));
+  Handle<Context> frame_context =
+      Handle<Context>::cast(frame_inspector_->GetContext());
 
   HandleScope scope(isolate_);
   Handle<SharedFunctionInfo> shared(function->shared());
@@ -471,7 +468,7 @@
   if (!scope_info->HasContext()) return local_scope;
 
   // Third fill all context locals.
-  Handle<Context> function_context(frame_context->declaration_context());
+  Handle<Context> function_context(frame_context->closure_context());
   CopyContextLocalsToScopeObject(scope_info, function_context, local_scope);
 
   // Finally copy any properties from the function context extension.
@@ -480,8 +477,8 @@
       function_context->has_extension() &&
       !function_context->IsNativeContext()) {
     bool success = CopyContextExtensionToScopeObject(
-        handle(function_context->extension_object(), isolate_),
-        local_scope, JSReceiver::INCLUDE_PROTOS);
+        handle(function_context->extension_object(), isolate_), local_scope,
+        INCLUDE_PROTOS);
     if (!success) return MaybeHandle<JSObject>();
   }
 
@@ -510,8 +507,7 @@
   // be variables introduced by eval.
   if (context->has_extension()) {
     bool success = CopyContextExtensionToScopeObject(
-        handle(context->extension_object(), isolate_), closure_scope,
-        JSReceiver::OWN_ONLY);
+        handle(context->extension_object(), isolate_), closure_scope, OWN_ONLY);
     DCHECK(success);
     USE(success);
   }
@@ -559,8 +555,7 @@
     // Fill all extension variables.
     if (context->extension_object() != nullptr) {
       bool success = CopyContextExtensionToScopeObject(
-          handle(context->extension_object()), block_scope,
-          JSReceiver::OWN_ONLY);
+          handle(context->extension_object()), block_scope, OWN_ONLY);
       DCHECK(success);
       USE(success);
     }
@@ -798,10 +793,9 @@
   }
 }
 
-
 bool ScopeIterator::CopyContextExtensionToScopeObject(
     Handle<JSObject> extension, Handle<JSObject> scope_object,
-    JSReceiver::KeyCollectionType type) {
+    KeyCollectionType type) {
   Handle<FixedArray> keys;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
       isolate_, keys, JSReceiver::GetKeys(extension, type, ENUMERABLE_STRINGS),
diff --git a/src/debug/debug-scopes.h b/src/debug/debug-scopes.h
index d4e335a..fbdf632 100644
--- a/src/debug/debug-scopes.h
+++ b/src/debug/debug-scopes.h
@@ -96,8 +96,7 @@
   }
 
   inline Handle<JSFunction> GetFunction() {
-    return Handle<JSFunction>(
-        JSFunction::cast(frame_inspector_->GetFunction()));
+    return Handle<JSFunction>::cast(frame_inspector_->GetFunction());
   }
 
   static bool InternalizedStringMatch(void* key1, void* key2) {
@@ -139,7 +138,7 @@
                                       Handle<JSObject> scope_object);
   bool CopyContextExtensionToScopeObject(Handle<JSObject> extension,
                                          Handle<JSObject> scope_object,
-                                         JSReceiver::KeyCollectionType type);
+                                         KeyCollectionType type);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
 };
diff --git a/src/debug/debug.cc b/src/debug/debug.cc
index bd45b71..93c914c 100644
--- a/src/debug/debug.cc
+++ b/src/debug/debug.cc
@@ -16,6 +16,8 @@
 #include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/global-handles.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
 #include "src/isolate-inl.h"
 #include "src/list.h"
 #include "src/log.h"
@@ -58,29 +60,39 @@
   return v8::Utils::ToLocal(native_context);
 }
 
-
-BreakLocation::BreakLocation(Handle<DebugInfo> debug_info, RelocInfo* rinfo,
-                             int position, int statement_position)
+BreakLocation::BreakLocation(Handle<DebugInfo> debug_info, DebugBreakType type,
+                             int code_offset, int position,
+                             int statement_position)
     : debug_info_(debug_info),
-      pc_offset_(static_cast<int>(rinfo->pc() - debug_info->code()->entry())),
-      rmode_(rinfo->rmode()),
-      data_(rinfo->data()),
+      code_offset_(code_offset),
+      type_(type),
       position_(position),
       statement_position_(statement_position) {}
 
+BreakLocation::Iterator* BreakLocation::GetIterator(
+    Handle<DebugInfo> debug_info, BreakLocatorType type) {
+  if (debug_info->abstract_code()->IsBytecodeArray()) {
+    return new BytecodeArrayIterator(debug_info, type);
+  } else {
+    return new CodeIterator(debug_info, type);
+  }
+}
 
-BreakLocation::Iterator::Iterator(Handle<DebugInfo> debug_info,
-                                  BreakLocatorType type)
+BreakLocation::Iterator::Iterator(Handle<DebugInfo> debug_info)
     : debug_info_(debug_info),
-      reloc_iterator_(debug_info->code(), GetModeMask(type)),
       break_index_(-1),
       position_(1),
-      statement_position_(1) {
+      statement_position_(1) {}
+
+BreakLocation::CodeIterator::CodeIterator(Handle<DebugInfo> debug_info,
+                                          BreakLocatorType type)
+    : Iterator(debug_info),
+      reloc_iterator_(debug_info->abstract_code()->GetCode(),
+                      GetModeMask(type)) {
   if (!Done()) Next();
 }
 
-
-int BreakLocation::Iterator::GetModeMask(BreakLocatorType type) {
+int BreakLocation::CodeIterator::GetModeMask(BreakLocatorType type) {
   int mask = 0;
   mask |= RelocInfo::ModeMask(RelocInfo::POSITION);
   mask |= RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION);
@@ -93,13 +105,11 @@
   return mask;
 }
 
-
-void BreakLocation::Iterator::Next() {
+void BreakLocation::CodeIterator::Next() {
   DisallowHeapAllocation no_gc;
   DCHECK(!Done());
 
-  // Iterate through reloc info for code and original code stopping at each
-  // breakable code target.
+  // Iterate through reloc info stopping at each breakable code target.
   bool first = break_index_ == -1;
   while (!Done()) {
     if (!first) reloc_iterator_.next();
@@ -141,43 +151,154 @@
   break_index_++;
 }
 
-
-// Find the break point at the supplied address, or the closest one before
-// the address.
-BreakLocation BreakLocation::FromAddress(Handle<DebugInfo> debug_info,
-                                         Address pc) {
-  Iterator it(debug_info, ALL_BREAK_LOCATIONS);
-  it.SkipTo(BreakIndexFromAddress(debug_info, pc));
-  return it.GetBreakLocation();
+BreakLocation BreakLocation::CodeIterator::GetBreakLocation() {
+  DebugBreakType type;
+  if (RelocInfo::IsDebugBreakSlotAtReturn(rmode())) {
+    type = DEBUG_BREAK_SLOT_AT_RETURN;
+  } else if (RelocInfo::IsDebugBreakSlotAtCall(rmode())) {
+    type = DEBUG_BREAK_SLOT_AT_CALL;
+  } else if (RelocInfo::IsDebuggerStatement(rmode())) {
+    type = DEBUGGER_STATEMENT;
+  } else if (RelocInfo::IsDebugBreakSlot(rmode())) {
+    type = DEBUG_BREAK_SLOT;
+  } else {
+    type = NOT_DEBUG_BREAK;
+  }
+  return BreakLocation(debug_info_, type, code_offset(), position(),
+                       statement_position());
 }
 
+BreakLocation::BytecodeArrayIterator::BytecodeArrayIterator(
+    Handle<DebugInfo> debug_info, BreakLocatorType type)
+    : Iterator(debug_info),
+      source_position_iterator_(
+          debug_info->abstract_code()->GetBytecodeArray()),
+      break_locator_type_(type),
+      start_position_(debug_info->shared()->start_position()) {
+  if (!Done()) Next();
+}
+
+void BreakLocation::BytecodeArrayIterator::Next() {
+  DisallowHeapAllocation no_gc;
+  DCHECK(!Done());
+  bool first = break_index_ == -1;
+  while (!Done()) {
+    if (!first) source_position_iterator_.Advance();
+    first = false;
+    if (Done()) return;
+    position_ = source_position_iterator_.source_position() - start_position_;
+    if (source_position_iterator_.is_statement()) {
+      statement_position_ = position_;
+    }
+    DCHECK(position_ >= 0);
+    DCHECK(statement_position_ >= 0);
+    break_index_++;
+
+    enum DebugBreakType type = GetDebugBreakType();
+    if (type == NOT_DEBUG_BREAK) continue;
+
+    if (break_locator_type_ == ALL_BREAK_LOCATIONS) break;
+
+    DCHECK_EQ(CALLS_AND_RETURNS, break_locator_type_);
+    if (type == DEBUG_BREAK_SLOT_AT_CALL ||
+        type == DEBUG_BREAK_SLOT_AT_RETURN) {
+      break;
+    }
+  }
+}
+
+BreakLocation::DebugBreakType
+BreakLocation::BytecodeArrayIterator::GetDebugBreakType() {
+  BytecodeArray* bytecode_array = debug_info_->original_bytecode_array();
+  interpreter::Bytecode bytecode =
+      interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
+
+  if (bytecode == interpreter::Bytecode::kDebugger) {
+    return DEBUGGER_STATEMENT;
+  } else if (bytecode == interpreter::Bytecode::kReturn) {
+    return DEBUG_BREAK_SLOT_AT_RETURN;
+  } else if (interpreter::Bytecodes::IsCallOrNew(bytecode)) {
+    return DEBUG_BREAK_SLOT_AT_CALL;
+  } else if (source_position_iterator_.is_statement()) {
+    return DEBUG_BREAK_SLOT;
+  } else {
+    return NOT_DEBUG_BREAK;
+  }
+}
+
+BreakLocation BreakLocation::BytecodeArrayIterator::GetBreakLocation() {
+  return BreakLocation(debug_info_, GetDebugBreakType(), code_offset(),
+                       position(), statement_position());
+}
 
 // Find the break point at the supplied address, or the closest one before
 // the address.
-void BreakLocation::FromAddressSameStatement(Handle<DebugInfo> debug_info,
-                                             Address pc,
-                                             List<BreakLocation>* result_out) {
-  int break_index = BreakIndexFromAddress(debug_info, pc);
-  Iterator it(debug_info, ALL_BREAK_LOCATIONS);
-  it.SkipTo(break_index);
-  int statement_position = it.statement_position();
-  while (!it.Done() && it.statement_position() == statement_position) {
-    result_out->Add(it.GetBreakLocation());
-    it.Next();
+BreakLocation BreakLocation::FromCodeOffset(Handle<DebugInfo> debug_info,
+                                            int offset) {
+  base::SmartPointer<Iterator> it(GetIterator(debug_info));
+  it->SkipTo(BreakIndexFromCodeOffset(debug_info, offset));
+  return it->GetBreakLocation();
+}
+
+FrameSummary GetFirstFrameSummary(JavaScriptFrame* frame) {
+  List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+  frame->Summarize(&frames);
+  return frames.first();
+}
+
+int CallOffsetFromCodeOffset(int code_offset, bool is_interpreted) {
+  // Code offset points to the instruction after the call. Subtract 1 to
+  // exclude that instruction from the search. For bytecode, the code offset
+  // still points to the call.
+  return is_interpreted ? code_offset : code_offset - 1;
+}
+
+BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
+                                       JavaScriptFrame* frame) {
+  FrameSummary summary = GetFirstFrameSummary(frame);
+  int call_offset =
+      CallOffsetFromCodeOffset(summary.code_offset(), frame->is_interpreted());
+  return FromCodeOffset(debug_info, call_offset);
+}
+
+// Find the break point at the supplied address, or the closest one before
+// the address.
+void BreakLocation::FromCodeOffsetSameStatement(
+    Handle<DebugInfo> debug_info, int offset, List<BreakLocation>* result_out) {
+  int break_index = BreakIndexFromCodeOffset(debug_info, offset);
+  base::SmartPointer<Iterator> it(GetIterator(debug_info));
+  it->SkipTo(break_index);
+  int statement_position = it->statement_position();
+  while (!it->Done() && it->statement_position() == statement_position) {
+    result_out->Add(it->GetBreakLocation());
+    it->Next();
   }
 }
 
 
-int BreakLocation::BreakIndexFromAddress(Handle<DebugInfo> debug_info,
-                                         Address pc) {
+void BreakLocation::AllForStatementPosition(Handle<DebugInfo> debug_info,
+                                            int statement_position,
+                                            List<BreakLocation>* result_out) {
+  for (base::SmartPointer<Iterator> it(GetIterator(debug_info)); !it->Done();
+       it->Next()) {
+    if (it->statement_position() == statement_position) {
+      result_out->Add(it->GetBreakLocation());
+    }
+  }
+}
+
+int BreakLocation::BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
+                                            int offset) {
   // Run through all break points to locate the one closest to the address.
   int closest_break = 0;
   int distance = kMaxInt;
-  for (Iterator it(debug_info, ALL_BREAK_LOCATIONS); !it.Done(); it.Next()) {
+  DCHECK(0 <= offset && offset < debug_info->abstract_code()->Size());
+  for (base::SmartPointer<Iterator> it(GetIterator(debug_info)); !it->Done();
+       it->Next()) {
     // Check if this break point is closer that what was previously found.
-    if (it.pc() <= pc && pc - it.pc() < distance) {
-      closest_break = it.break_index();
-      distance = static_cast<int>(pc - it.pc());
+    if (it->code_offset() <= offset && offset - it->code_offset() < distance) {
+      closest_break = it->break_index();
+      distance = offset - it->code_offset();
       // Check whether we can't get any closer.
       if (distance == 0) break;
     }
@@ -191,28 +312,26 @@
                                           BreakPositionAlignment alignment) {
   // Run through all break points to locate the one closest to the source
   // position.
-  int closest_break = 0;
   int distance = kMaxInt;
-
-  for (Iterator it(debug_info, ALL_BREAK_LOCATIONS); !it.Done(); it.Next()) {
+  base::SmartPointer<Iterator> it(GetIterator(debug_info));
+  BreakLocation closest_break = it->GetBreakLocation();
+  while (!it->Done()) {
     int next_position;
     if (alignment == STATEMENT_ALIGNED) {
-      next_position = it.statement_position();
+      next_position = it->statement_position();
     } else {
       DCHECK(alignment == BREAK_POSITION_ALIGNED);
-      next_position = it.position();
+      next_position = it->position();
     }
     if (position <= next_position && next_position - position < distance) {
-      closest_break = it.break_index();
+      closest_break = it->GetBreakLocation();
       distance = next_position - position;
       // Check whether we can't get any closer.
       if (distance == 0) break;
     }
+    it->Next();
   }
-
-  Iterator it(debug_info, ALL_BREAK_LOCATIONS);
-  it.SkipTo(closest_break);
-  return it.GetBreakLocation();
+  return closest_break;
 }
 
 
@@ -222,14 +341,14 @@
   if (!HasBreakPoint()) SetDebugBreak();
   DCHECK(IsDebugBreak() || IsDebuggerStatement());
   // Set the break point information.
-  DebugInfo::SetBreakPoint(debug_info_, pc_offset_, position_,
+  DebugInfo::SetBreakPoint(debug_info_, code_offset_, position_,
                            statement_position_, break_point_object);
 }
 
 
 void BreakLocation::ClearBreakPoint(Handle<Object> break_point_object) {
   // Clear the break point information.
-  DebugInfo::ClearBreakPoint(debug_info_, pc_offset_, break_point_object);
+  DebugInfo::ClearBreakPoint(debug_info_, code_offset_, break_point_object);
   // If there are no more break points here remove the debug break.
   if (!HasBreakPoint()) {
     ClearDebugBreak();
@@ -280,11 +399,23 @@
   if (IsDebugBreak()) return;
 
   DCHECK(IsDebugBreakSlot());
-  Isolate* isolate = debug_info_->GetIsolate();
-  Builtins* builtins = isolate->builtins();
-  Handle<Code> target =
-      IsReturn() ? builtins->Return_DebugBreak() : builtins->Slot_DebugBreak();
-  DebugCodegen::PatchDebugBreakSlot(isolate, pc(), target);
+  if (abstract_code()->IsCode()) {
+    Code* code = abstract_code()->GetCode();
+    DCHECK(code->kind() == Code::FUNCTION);
+    Builtins* builtins = isolate()->builtins();
+    Handle<Code> target = IsReturn() ? builtins->Return_DebugBreak()
+                                     : builtins->Slot_DebugBreak();
+    Address pc = code->instruction_start() + code_offset();
+    DebugCodegen::PatchDebugBreakSlot(isolate(), pc, target);
+  } else {
+    BytecodeArray* bytecode_array = abstract_code()->GetBytecodeArray();
+    interpreter::Bytecode bytecode =
+        interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
+    interpreter::Bytecode debugbreak =
+        interpreter::Bytecodes::GetDebugBreak(bytecode);
+    bytecode_array->set(code_offset(),
+                        interpreter::Bytecodes::ToByte(debugbreak));
+  }
   DCHECK(IsDebugBreak());
 }
 
@@ -294,7 +425,16 @@
   if (IsDebuggerStatement()) return;
 
   DCHECK(IsDebugBreakSlot());
-  DebugCodegen::ClearDebugBreakSlot(debug_info_->GetIsolate(), pc());
+  if (abstract_code()->IsCode()) {
+    Code* code = abstract_code()->GetCode();
+    DCHECK(code->kind() == Code::FUNCTION);
+    Address pc = code->instruction_start() + code_offset();
+    DebugCodegen::ClearDebugBreakSlot(isolate(), pc);
+  } else {
+    BytecodeArray* bytecode_array = abstract_code()->GetBytecodeArray();
+    BytecodeArray* original = debug_info_->original_bytecode_array();
+    bytecode_array->set(code_offset(), original->get(code_offset()));
+  }
   DCHECK(!IsDebugBreak());
 }
 
@@ -302,15 +442,24 @@
 bool BreakLocation::IsDebugBreak() const {
   if (IsDebuggerStatement()) return false;
   DCHECK(IsDebugBreakSlot());
-  return rinfo().IsPatchedDebugBreakSlotSequence();
+  if (abstract_code()->IsCode()) {
+    Code* code = abstract_code()->GetCode();
+    DCHECK(code->kind() == Code::FUNCTION);
+    Address pc = code->instruction_start() + code_offset();
+    return DebugCodegen::DebugBreakSlotIsPatched(pc);
+  } else {
+    BytecodeArray* bytecode_array = abstract_code()->GetBytecodeArray();
+    interpreter::Bytecode bytecode =
+        interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
+    return interpreter::Bytecodes::IsDebugBreak(bytecode);
+  }
 }
 
 
 Handle<Object> BreakLocation::BreakPointObjects() const {
-  return debug_info_->GetBreakPointObjects(pc_offset_);
+  return debug_info_->GetBreakPointObjects(code_offset_);
 }
 
-
 void DebugFeatureTracker::Track(DebugFeatureTracker::Feature feature) {
   uint32_t mask = 1 << feature;
   // Only count one sample per feature and isolate.
@@ -444,22 +593,16 @@
   Handle<DebugInfo> debug_info(shared->GetDebugInfo());
 
   // Find the break location where execution has stopped.
-  // PC points to the instruction after the current one, possibly a break
-  // location as well. So the "- 1" to exclude it from the search.
-  Address call_pc = frame->pc() - 1;
-  BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
+  BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
 
   // Find actual break points, if any, and trigger debug break event.
-  if (break_points_active_ && location.HasBreakPoint()) {
-    Handle<Object> break_point_objects = location.BreakPointObjects();
-    Handle<Object> break_points_hit = CheckBreakPoints(break_point_objects);
-    if (!break_points_hit->IsUndefined()) {
-      // Clear all current stepping setup.
-      ClearStepping();
-      // Notify the debug event listeners.
-      OnDebugBreak(break_points_hit, false);
-      return;
-    }
+  Handle<Object> break_points_hit = CheckBreakPoints(&location);
+  if (!break_points_hit->IsUndefined()) {
+    // Clear all current stepping setup.
+    ClearStepping();
+    // Notify the debug event listeners.
+    OnDebugBreak(break_points_hit, false);
+    return;
   }
 
   // No break point. Check for stepping.
@@ -480,11 +623,14 @@
       // Step next should not break in a deeper frame.
       if (current_fp < target_fp) return;
     // Fall through.
-    case StepIn:
+    case StepIn: {
+      FrameSummary summary = GetFirstFrameSummary(frame);
+      int offset = summary.code_offset();
       step_break = location.IsReturn() || (current_fp != last_fp) ||
                    (thread_local_.last_statement_position_ !=
-                    location.code()->SourceStatementPosition(frame->pc()));
+                    location.abstract_code()->SourceStatementPosition(offset));
       break;
+    }
     case StepFrame:
       step_break = current_fp != last_fp;
       break;
@@ -503,12 +649,17 @@
 }
 
 
-// Check the break point objects for whether one or more are actually
-// triggered. This function returns a JSArray with the break point objects
-// which is triggered.
-Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
+// Find break point objects for this location, if any, and evaluate them.
+// Return an array of break point objects that evaluated true.
+Handle<Object> Debug::CheckBreakPoints(BreakLocation* location,
+                                       bool* has_break_points) {
   Factory* factory = isolate_->factory();
+  bool has_break_points_to_check =
+      break_points_active_ && location->HasBreakPoint();
+  if (has_break_points) *has_break_points = has_break_points_to_check;
+  if (!has_break_points_to_check) return factory->undefined_value();
 
+  Handle<Object> break_point_objects = location->BreakPointObjects();
   // Count the number of break points hit. If there are multiple break points
   // they are in a FixedArray.
   Handle<FixedArray> break_points_hit;
@@ -518,9 +669,9 @@
     Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
     break_points_hit = factory->NewFixedArray(array->length());
     for (int i = 0; i < array->length(); i++) {
-      Handle<Object> o(array->get(i), isolate_);
-      if (CheckBreakPoint(o)) {
-        break_points_hit->set(break_points_hit_count++, *o);
+      Handle<Object> break_point_object(array->get(i), isolate_);
+      if (CheckBreakPoint(break_point_object)) {
+        break_points_hit->set(break_points_hit_count++, *break_point_object);
       }
     }
   } else {
@@ -529,25 +680,51 @@
       break_points_hit->set(break_points_hit_count++, *break_point_objects);
     }
   }
-
-  // Return undefined if no break points were triggered.
-  if (break_points_hit_count == 0) {
-    return factory->undefined_value();
-  }
-  // Return break points hit as a JSArray.
+  if (break_points_hit_count == 0) return factory->undefined_value();
   Handle<JSArray> result = factory->NewJSArrayWithElements(break_points_hit);
   result->set_length(Smi::FromInt(break_points_hit_count));
   return result;
 }
 
 
+bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
+  // A break location is considered muted if break locations on the current
+  // statement have at least one break point, and all of these break points
+  // evaluate to false. Aside from not triggering a debug break event at the
+  // break location, we also do not trigger one for debugger statements, nor
+  // an exception event on exception at this location.
+  Object* fun = frame->function();
+  if (!fun->IsJSFunction()) return false;
+  JSFunction* function = JSFunction::cast(fun);
+  if (!function->shared()->HasDebugInfo()) return false;
+  HandleScope scope(isolate_);
+  Handle<DebugInfo> debug_info(function->shared()->GetDebugInfo());
+  // Enter the debugger.
+  DebugScope debug_scope(this);
+  if (debug_scope.failed()) return false;
+  BreakLocation current_position = BreakLocation::FromFrame(debug_info, frame);
+  List<BreakLocation> break_locations;
+  BreakLocation::AllForStatementPosition(
+      debug_info, current_position.statement_position(), &break_locations);
+  bool has_break_points_at_all = false;
+  for (int i = 0; i < break_locations.length(); i++) {
+    bool has_break_points;
+    Handle<Object> check_result =
+        CheckBreakPoints(&break_locations[i], &has_break_points);
+    has_break_points_at_all |= has_break_points;
+    if (has_break_points && !check_result->IsUndefined()) return false;
+  }
+  return has_break_points_at_all;
+}
+
+
 MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
                                         Handle<Object> args[]) {
   PostponeInterruptsScope no_interrupts(isolate_);
   AssertDebugContext();
   Handle<Object> holder = isolate_->natives_utils_object();
   Handle<JSFunction> fun = Handle<JSFunction>::cast(
-      Object::GetProperty(isolate_, holder, name, STRICT).ToHandleChecked());
+      Object::GetProperty(isolate_, holder, name).ToHandleChecked());
   Handle<Object> undefined = isolate_->factory()->undefined_value();
   return Execution::TryCall(isolate_, fun, undefined, argc, args);
 }
@@ -668,11 +845,8 @@
           Handle<BreakPointInfo>::cast(result);
       Handle<DebugInfo> debug_info = node->debug_info();
 
-      // Find the break point and clear it.
-      Address pc =
-          debug_info->code()->entry() + break_point_info->code_position();
-
-      BreakLocation location = BreakLocation::FromAddress(debug_info, pc);
+      BreakLocation location = BreakLocation::FromCodeOffset(
+          debug_info, break_point_info->code_offset());
       location.ClearBreakPoint(break_point_object);
 
       // If there are no more break points left remove the debug info for this
@@ -694,9 +868,10 @@
 void Debug::ClearAllBreakPoints() {
   for (DebugInfoListNode* node = debug_info_list_; node != NULL;
        node = node->next()) {
-    for (BreakLocation::Iterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
-         !it.Done(); it.Next()) {
-      it.GetBreakLocation().ClearDebugBreak();
+    for (base::SmartPointer<BreakLocation::Iterator> it(
+             BreakLocation::GetIterator(node->debug_info()));
+         !it->Done(); it->Next()) {
+      it->GetBreakLocation().ClearDebugBreak();
     }
   }
   // Remove all debug info.
@@ -727,8 +902,10 @@
 
   // Flood the function with break points.
   Handle<DebugInfo> debug_info(shared->GetDebugInfo());
-  for (BreakLocation::Iterator it(debug_info, type); !it.Done(); it.Next()) {
-    it.GetBreakLocation().SetOneShot();
+  for (base::SmartPointer<BreakLocation::Iterator> it(
+           BreakLocation::GetIterator(debug_info, type));
+       !it->Done(); it->Next()) {
+    it->GetBreakLocation().SetOneShot();
   }
 }
 
@@ -751,13 +928,6 @@
 }
 
 
-FrameSummary GetFirstFrameSummary(JavaScriptFrame* frame) {
-  List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-  frame->Summarize(&frames);
-  return frames.first();
-}
-
-
 void Debug::PrepareStepIn(Handle<JSFunction> function) {
   if (!is_active()) return;
   if (last_step_action() < StepIn) return;
@@ -779,8 +949,7 @@
   JavaScriptFrameIterator it(isolate_);
   while (!it.done()) {
     JavaScriptFrame* frame = it.frame();
-    int stack_slots = 0;  // The computed stack slot count is not used.
-    if (frame->LookupExceptionHandlerInTable(&stack_slots, NULL) > 0) break;
+    if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) break;
     it.Advance();
   }
 
@@ -843,18 +1012,21 @@
 
   Handle<DebugInfo> debug_info(shared->GetDebugInfo());
   // Refresh frame summary if the code has been recompiled for debugging.
-  if (shared->code() != *summary.code()) summary = GetFirstFrameSummary(frame);
+  if (AbstractCode::cast(shared->code()) != *summary.abstract_code()) {
+    summary = GetFirstFrameSummary(frame);
+  }
 
-  // PC points to the instruction after the current one, possibly a break
-  // location as well. So the "- 1" to exclude it from the search.
-  Address call_pc = summary.pc() - 1;
-  BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
+  int call_offset =
+      CallOffsetFromCodeOffset(summary.code_offset(), frame->is_interpreted());
+  BreakLocation location =
+      BreakLocation::FromCodeOffset(debug_info, call_offset);
 
   // At a return statement we will step out either way.
   if (location.IsReturn()) step_action = StepOut;
 
   thread_local_.last_statement_position_ =
-      debug_info->code()->SourceStatementPosition(summary.pc());
+      debug_info->abstract_code()->SourceStatementPosition(
+          summary.code_offset());
   thread_local_.last_fp_ = frame->UnpaddedFP();
 
   switch (step_action) {
@@ -961,9 +1133,10 @@
   // removed from the list.
   for (DebugInfoListNode* node = debug_info_list_; node != NULL;
        node = node->next()) {
-    for (BreakLocation::Iterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
-         !it.Done(); it.Next()) {
-      it.GetBreakLocation().ClearOneShot();
+    for (base::SmartPointer<BreakLocation::Iterator> it(
+             BreakLocation::GetIterator(node->debug_info()));
+         !it->Done(); it->Next()) {
+      it->GetBreakLocation().ClearOneShot();
     }
   }
 }
@@ -1070,6 +1243,15 @@
       if (frame->is_optimized()) continue;
       if (!function->Inlines(shared_)) continue;
 
+      if (frame->is_interpreted()) {
+        InterpretedFrame* interpreted_frame =
+            reinterpret_cast<InterpretedFrame*>(frame);
+        BytecodeArray* debug_copy =
+            shared_->GetDebugInfo()->abstract_code()->GetBytecodeArray();
+        interpreted_frame->PatchBytecodeArray(debug_copy);
+        continue;
+      }
+
       Code* frame_code = frame->LookupCode();
       DCHECK(frame_code->kind() == Code::FUNCTION);
       if (frame_code->has_debug_break_slots()) continue;
@@ -1127,11 +1309,15 @@
   // Make sure we abort incremental marking.
   isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
                                       "prepare for break points");
+  bool is_interpreted = shared->HasBytecodeArray();
 
   {
+    // TODO(yangguo): with bytecode, we still walk the heap to find all
+    // optimized code for the function to deoptimize. We can probably be
+    // smarter here and avoid the heap walk.
     HeapIterator iterator(isolate_->heap());
     HeapObject* obj;
-    bool include_generators = shared->is_generator();
+    bool include_generators = !is_interpreted && shared->is_generator();
 
     while ((obj = iterator.next())) {
       if (obj->IsJSFunction()) {
@@ -1140,6 +1326,7 @@
         if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
           Deoptimizer::DeoptimizeFunction(function);
         }
+        if (is_interpreted) continue;
         if (function->shared() == *shared) functions.Add(handle(function));
       } else if (include_generators && obj->IsJSGeneratorObject()) {
         JSGeneratorObject* generator_obj = JSGeneratorObject::cast(obj);
@@ -1155,7 +1342,12 @@
     }
   }
 
-  if (!shared->HasDebugCode()) {
+  // We do not need to replace code to debug bytecode.
+  DCHECK(!is_interpreted || functions.length() == 0);
+  DCHECK(!is_interpreted || suspended_generators.length() == 0);
+
+  // We do not need to recompile to debug bytecode.
+  if (!is_interpreted && !shared->HasDebugCode()) {
     DCHECK(functions.length() > 0);
     if (!Compiler::CompileDebugCode(functions.first())) return false;
   }
@@ -1326,10 +1518,16 @@
     return false;
   }
 
-  if (!PrepareFunctionForBreakPoints(shared)) return false;
-
-  CreateDebugInfo(shared);
-
+  if (shared->HasBytecodeArray()) {
+    // To prepare bytecode for debugging, we already need to have the debug
+    // info (containing the debug copy) upfront, but since we do not recompile,
+    // preparing for break points cannot fail.
+    CreateDebugInfo(shared);
+    CHECK(PrepareFunctionForBreakPoints(shared));
+  } else {
+    if (!PrepareFunctionForBreakPoints(shared)) return false;
+    CreateDebugInfo(shared);
+  }
   return true;
 }
 
@@ -1363,7 +1561,7 @@
         prev->set_next(current->next());
       }
       delete current;
-      shared->set_debug_info(isolate_->heap()->undefined_value());
+      shared->set_debug_info(DebugInfo::uninitialized());
       return;
     }
     // Move to next in list.
@@ -1374,14 +1572,25 @@
   UNREACHABLE();
 }
 
-
-void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
-  after_break_target_ = NULL;
-
-  if (LiveEdit::SetAfterBreakTarget(this)) return;  // LiveEdit did the job.
-
-  // Continue just after the slot.
-  after_break_target_ = frame->pc();
+Object* Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
+  if (frame->is_interpreted()) {
+    // Find the handler from the original bytecode array.
+    InterpretedFrame* interpreted_frame =
+        reinterpret_cast<InterpretedFrame*>(frame);
+    SharedFunctionInfo* shared = interpreted_frame->function()->shared();
+    BytecodeArray* bytecode_array = shared->bytecode_array();
+    int bytecode_offset = interpreted_frame->GetBytecodeOffset();
+    interpreter::Bytecode bytecode =
+        interpreter::Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
+    return isolate_->interpreter()->GetBytecodeHandler(bytecode);
+  } else {
+    after_break_target_ = NULL;
+    if (!LiveEdit::SetAfterBreakTarget(this)) {
+      // Continue just after the slot.
+      after_break_target_ = frame->pc();
+    }
+    return isolate_->heap()->undefined_value();
+  }
 }
 
 
@@ -1394,21 +1603,14 @@
 
   // With no debug info there are no break points, so we can't be at a return.
   if (!shared->HasDebugInfo()) return false;
-  Handle<DebugInfo> debug_info(shared->GetDebugInfo());
-  Handle<Code> code(debug_info->code());
-#ifdef DEBUG
-  // Get the code which is actually executing.
-  Handle<Code> frame_code(frame->LookupCode());
-  DCHECK(frame_code.is_identical_to(code));
-#endif
 
-  // Find the reloc info matching the start of the debug break slot.
-  Address slot_pc = frame->pc() - Assembler::kDebugBreakSlotLength;
-  int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
-  for (RelocIterator it(*code, mask); !it.done(); it.next()) {
-    if (it.rinfo()->pc() == slot_pc) return true;
-  }
-  return false;
+  DCHECK(!frame->is_optimized());
+  FrameSummary summary = GetFirstFrameSummary(frame);
+
+  Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+  BreakLocation location =
+      BreakLocation::FromCodeOffset(debug_info, summary.code_offset());
+  return location.IsReturn();
 }
 
 
@@ -1466,16 +1668,18 @@
 
   Handle<DebugInfo> debug_info(shared->GetDebugInfo());
   // Refresh frame summary if the code has been recompiled for debugging.
-  if (shared->code() != *summary.code()) summary = GetFirstFrameSummary(frame);
+  if (AbstractCode::cast(shared->code()) != *summary.abstract_code()) {
+    summary = GetFirstFrameSummary(frame);
+  }
 
-  // Find range of break points starting from the break point where execution
-  // has stopped.
-  Address call_pc = summary.pc() - 1;
+  int call_offset =
+      CallOffsetFromCodeOffset(summary.code_offset(), frame->is_interpreted());
   List<BreakLocation> locations;
-  BreakLocation::FromAddressSameStatement(debug_info, call_pc, &locations);
+  BreakLocation::FromCodeOffsetSameStatement(debug_info, call_offset,
+                                             &locations);
 
   for (BreakLocation location : locations) {
-    if (location.pc() <= summary.pc()) {
+    if (location.code_offset() <= summary.code_offset()) {
       // The break point is near our pc. Could be a step-in possibility,
       // that is currently taken by active debugger call.
       if (break_frame_id() == StackFrame::NO_ID) {
@@ -1619,6 +1823,12 @@
     if (!break_on_exception_) return;
   }
 
+  {
+    // Check whether the break location is muted.
+    JavaScriptFrameIterator it(isolate_);
+    if (!it.done() && IsMutedAtCurrentLocation(it.frame())) return;
+  }
+
   DebugScope debug_scope(this);
   if (debug_scope.failed()) return;
 
@@ -1636,8 +1846,7 @@
 }
 
 
-void Debug::OnDebugBreak(Handle<Object> break_points_hit,
-                            bool auto_continue) {
+void Debug::OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue) {
   // The caller provided for DebugScope.
   AssertDebugContext();
   // Bail out if there is no listener for this event
@@ -2071,6 +2280,8 @@
           JSFunction::cast(fun)->context()->global_object();
       // Don't stop in debugger functions.
       if (IsDebugGlobal(global)) return;
+      // Don't stop if the break location is muted.
+      if (IsMutedAtCurrentLocation(it.frame())) return;
     }
   }
 
diff --git a/src/debug/debug.h b/src/debug/debug.h
index 7dcc2b5..81db9e5 100644
--- a/src/debug/debug.h
+++ b/src/debug/debug.h
@@ -16,6 +16,7 @@
 #include "src/flags.h"
 #include "src/frames.h"
 #include "src/hashmap.h"
+#include "src/interpreter/source-position-table.h"
 #include "src/runtime/runtime.h"
 #include "src/string-stream.h"
 #include "src/v8threads.h"
@@ -64,24 +65,32 @@
  public:
   // Find the break point at the supplied address, or the closest one before
   // the address.
-  static BreakLocation FromAddress(Handle<DebugInfo> debug_info, Address pc);
+  static BreakLocation FromCodeOffset(Handle<DebugInfo> debug_info, int offset);
 
-  static void FromAddressSameStatement(Handle<DebugInfo> debug_info, Address pc,
-                                       List<BreakLocation>* result_out);
+  static BreakLocation FromFrame(Handle<DebugInfo> debug_info,
+                                 JavaScriptFrame* frame);
+
+  static void FromCodeOffsetSameStatement(Handle<DebugInfo> debug_info,
+                                          int offset,
+                                          List<BreakLocation>* result_out);
+
+  static void AllForStatementPosition(Handle<DebugInfo> debug_info,
+                                      int statement_position,
+                                      List<BreakLocation>* result_out);
 
   static BreakLocation FromPosition(Handle<DebugInfo> debug_info, int position,
                                     BreakPositionAlignment alignment);
 
   bool IsDebugBreak() const;
 
-  inline bool IsReturn() const {
-    return RelocInfo::IsDebugBreakSlotAtReturn(rmode_);
-  }
-  inline bool IsCall() const {
-    return RelocInfo::IsDebugBreakSlotAtCall(rmode_);
+  inline bool IsReturn() const { return type_ == DEBUG_BREAK_SLOT_AT_RETURN; }
+  inline bool IsCall() const { return type_ == DEBUG_BREAK_SLOT_AT_CALL; }
+  inline bool IsDebugBreakSlot() const { return type_ >= DEBUG_BREAK_SLOT; }
+  inline bool IsDebuggerStatement() const {
+    return type_ == DEBUGGER_STATEMENT;
   }
   inline bool HasBreakPoint() const {
-    return debug_info_->HasBreakPoint(pc_offset_);
+    return debug_info_->HasBreakPoint(code_offset_);
   }
 
   Handle<Object> BreakPointObjects() const;
@@ -92,79 +101,118 @@
   void SetOneShot();
   void ClearOneShot();
 
-
-  inline RelocInfo rinfo() const {
-    return RelocInfo(debug_info_->GetIsolate(), pc(), rmode(), data_, code());
-  }
-
   inline int position() const { return position_; }
   inline int statement_position() const { return statement_position_; }
 
-  inline Address pc() const { return code()->entry() + pc_offset_; }
+  inline int code_offset() const { return code_offset_; }
+  inline Isolate* isolate() { return debug_info_->GetIsolate(); }
 
-  inline RelocInfo::Mode rmode() const { return rmode_; }
+  inline AbstractCode* abstract_code() const {
+    return debug_info_->abstract_code();
+  }
 
-  inline Code* code() const { return debug_info_->code(); }
+ protected:
+  enum DebugBreakType {
+    NOT_DEBUG_BREAK,
+    DEBUGGER_STATEMENT,
+    DEBUG_BREAK_SLOT,
+    DEBUG_BREAK_SLOT_AT_CALL,
+    DEBUG_BREAK_SLOT_AT_RETURN
+  };
 
- private:
-  BreakLocation(Handle<DebugInfo> debug_info, RelocInfo* rinfo, int position,
-                int statement_position);
+  BreakLocation(Handle<DebugInfo> debug_info, DebugBreakType type,
+                int code_offset, int position, int statement_position);
 
   class Iterator {
    public:
-    Iterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
+    virtual ~Iterator() {}
 
-    BreakLocation GetBreakLocation() {
-      return BreakLocation(debug_info_, rinfo(), position(),
-                           statement_position());
-    }
-
-    inline bool Done() const { return reloc_iterator_.done(); }
-    void Next();
+    virtual BreakLocation GetBreakLocation() = 0;
+    virtual bool Done() const = 0;
+    virtual void Next() = 0;
 
     void SkipTo(int count) {
       while (count-- > 0) Next();
     }
 
-    inline RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
-    inline RelocInfo* rinfo() { return reloc_iterator_.rinfo(); }
-    inline Address pc() { return rinfo()->pc(); }
+    virtual int code_offset() = 0;
     int break_index() const { return break_index_; }
     inline int position() const { return position_; }
     inline int statement_position() const { return statement_position_; }
 
-   private:
-    static int GetModeMask(BreakLocatorType type);
+   protected:
+    explicit Iterator(Handle<DebugInfo> debug_info);
 
     Handle<DebugInfo> debug_info_;
-    RelocIterator reloc_iterator_;
     int break_index_;
     int position_;
     int statement_position_;
 
+   private:
     DisallowHeapAllocation no_gc_;
-
     DISALLOW_COPY_AND_ASSIGN(Iterator);
   };
 
+  class CodeIterator : public Iterator {
+   public:
+    CodeIterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
+    ~CodeIterator() override {}
+
+    BreakLocation GetBreakLocation() override;
+    bool Done() const override { return reloc_iterator_.done(); }
+    void Next() override;
+
+    int code_offset() override {
+      return static_cast<int>(
+          rinfo()->pc() -
+          debug_info_->abstract_code()->GetCode()->instruction_start());
+    }
+
+   private:
+    static int GetModeMask(BreakLocatorType type);
+    RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
+    RelocInfo* rinfo() { return reloc_iterator_.rinfo(); }
+
+    RelocIterator reloc_iterator_;
+    DISALLOW_COPY_AND_ASSIGN(CodeIterator);
+  };
+
+  class BytecodeArrayIterator : public Iterator {
+   public:
+    BytecodeArrayIterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
+    ~BytecodeArrayIterator() override {}
+
+    BreakLocation GetBreakLocation() override;
+    bool Done() const override { return source_position_iterator_.done(); }
+    void Next() override;
+
+    int code_offset() override {
+      return source_position_iterator_.bytecode_offset();
+    }
+
+   private:
+    DebugBreakType GetDebugBreakType();
+
+    interpreter::SourcePositionTableIterator source_position_iterator_;
+    BreakLocatorType break_locator_type_;
+    int start_position_;
+    DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
+  };
+
+  static Iterator* GetIterator(Handle<DebugInfo> debug_info,
+                               BreakLocatorType type = ALL_BREAK_LOCATIONS);
+
+ private:
   friend class Debug;
 
-  static int BreakIndexFromAddress(Handle<DebugInfo> debug_info, Address pc);
+  static int BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info, int offset);
 
   void SetDebugBreak();
   void ClearDebugBreak();
 
-  inline bool IsDebuggerStatement() const {
-    return RelocInfo::IsDebuggerStatement(rmode_);
-  }
-  inline bool IsDebugBreakSlot() const {
-    return RelocInfo::IsDebugBreakSlot(rmode_);
-  }
-
   Handle<DebugInfo> debug_info_;
-  int pc_offset_;
-  RelocInfo::Mode rmode_;
-  intptr_t data_;
+  int code_offset_;
+  DebugBreakType type_;
   int position_;
   int statement_position_;
 };
@@ -383,7 +431,7 @@
   // Internal logic
   bool Load();
   void Break(Arguments args, JavaScriptFrame*);
-  void SetAfterBreakTarget(JavaScriptFrame* frame);
+  Object* SetAfterBreakTarget(JavaScriptFrame* frame);
 
   // Scripts handling.
   Handle<FixedArray> GetLoadedScripts();
@@ -555,7 +603,9 @@
   void ClearOneShot();
   void ActivateStepOut(StackFrame* frame);
   void RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info);
-  Handle<Object> CheckBreakPoints(Handle<Object> break_point);
+  Handle<Object> CheckBreakPoints(BreakLocation* location,
+                                  bool* has_break_points = nullptr);
+  bool IsMutedAtCurrentLocation(JavaScriptFrame* frame);
   bool CheckBreakPoint(Handle<Object> break_point_object);
   MaybeHandle<Object> CallFunction(const char* name, int argc,
                                    Handle<Object> args[]);
@@ -739,6 +789,7 @@
 
   static void PatchDebugBreakSlot(Isolate* isolate, Address pc,
                                   Handle<Code> code);
+  static bool DebugBreakSlotIsPatched(Address pc);
   static void ClearDebugBreakSlot(Isolate* isolate, Address pc);
 };
 
diff --git a/src/debug/debug.js b/src/debug/debug.js
index bc2c696..6849bf5 100644
--- a/src/debug/debug.js
+++ b/src/debug/debug.js
@@ -147,10 +147,8 @@
   } else {
     this.number_ = next_break_point_number++;
   }
-  this.hit_count_ = 0;
   this.active_ = true;
   this.condition_ = null;
-  this.ignoreCount_ = 0;
 }
 
 
@@ -169,11 +167,6 @@
 };
 
 
-BreakPoint.prototype.hit_count = function() {
-  return this.hit_count_;
-};
-
-
 BreakPoint.prototype.active = function() {
   if (this.script_break_point()) {
     return this.script_break_point().active();
@@ -190,11 +183,6 @@
 };
 
 
-BreakPoint.prototype.ignoreCount = function() {
-  return this.ignoreCount_;
-};
-
-
 BreakPoint.prototype.script_break_point = function() {
   return this.script_break_point_;
 };
@@ -215,11 +203,6 @@
 };
 
 
-BreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
-  this.ignoreCount_ = ignoreCount;
-};
-
-
 BreakPoint.prototype.isTriggered = function(exec_state) {
   // Break point not active - not triggered.
   if (!this.active()) return false;
@@ -239,18 +222,6 @@
     }
   }
 
-  // Update the hit count.
-  this.hit_count_++;
-  if (this.script_break_point_) {
-    this.script_break_point_.hit_count_++;
-  }
-
-  // If the break point has an ignore count it is not triggered.
-  if (this.ignoreCount_ > 0) {
-    this.ignoreCount_--;
-    return false;
-  }
-
   // Break point triggered.
   return true;
 };
@@ -283,10 +254,8 @@
   this.groupId_ = opt_groupId;
   this.position_alignment_ = IS_UNDEFINED(opt_position_alignment)
       ? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
-  this.hit_count_ = 0;
   this.active_ = true;
   this.condition_ = null;
-  this.ignoreCount_ = 0;
   this.break_points_ = [];
 }
 
@@ -299,10 +268,8 @@
   copy.number_ = next_break_point_number++;
   script_break_points.push(copy);
 
-  copy.hit_count_ = this.hit_count_;
   copy.active_ = this.active_;
   copy.condition_ = this.condition_;
-  copy.ignoreCount_ = this.ignoreCount_;
   return copy;
 };
 
@@ -362,11 +329,6 @@
 };
 
 
-ScriptBreakPoint.prototype.hit_count = function() {
-  return this.hit_count_;
-};
-
-
 ScriptBreakPoint.prototype.active = function() {
   return this.active_;
 };
@@ -377,11 +339,6 @@
 };
 
 
-ScriptBreakPoint.prototype.ignoreCount = function() {
-  return this.ignoreCount_;
-};
-
-
 ScriptBreakPoint.prototype.enable = function() {
   this.active_ = true;
 };
@@ -397,16 +354,6 @@
 };
 
 
-ScriptBreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
-  this.ignoreCount_ = ignoreCount;
-
-  // Set ignore count on all break points created from this script break point.
-  for (var i = 0; i < this.break_points_.length; i++) {
-    this.break_points_[i].setIgnoreCount(ignoreCount);
-  }
-};
-
-
 // Check whether a script matches this script break point. Currently this is
 // only based on script name.
 ScriptBreakPoint.prototype.matchesScript = function(script) {
@@ -461,7 +408,6 @@
 
   // Create a break point object and set the break point.
   var break_point = MakeBreakPoint(position, this);
-  break_point.setIgnoreCount(this.ignoreCount());
   var actual_position = %SetScriptBreakPoint(script, position,
                                              this.position_alignment_,
                                              break_point);
@@ -726,13 +672,6 @@
 };
 
 
-Debug.changeBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
-  if (ignoreCount < 0) throw MakeError(kDebugger, 'Invalid argument');
-  var break_point = this.findBreakPoint(break_point_number, false);
-  break_point.setIgnoreCount(ignoreCount);
-};
-
-
 Debug.clearBreakPoint = function(break_point_number) {
   var break_point = this.findBreakPoint(break_point_number, true);
   if (break_point) {
@@ -857,14 +796,6 @@
 };
 
 
-Debug.changeScriptBreakPointIgnoreCount = function(
-    break_point_number, ignoreCount) {
-  if (ignoreCount < 0) throw MakeError(kDebugger, 'Invalid argument');
-  var script_break_point = this.findScriptBreakPoint(break_point_number, false);
-  script_break_point.setIgnoreCount(ignoreCount);
-};
-
-
 Debug.scriptBreakPoints = function() {
   return script_break_points;
 };
@@ -1503,7 +1434,6 @@
   var enabled = IS_UNDEFINED(request.arguments.enabled) ?
       true : request.arguments.enabled;
   var condition = request.arguments.condition;
-  var ignoreCount = request.arguments.ignoreCount;
   var groupId = request.arguments.groupId;
 
   // Check for legal arguments.
@@ -1569,9 +1499,6 @@
 
   // Set additional break point properties.
   var break_point = Debug.findBreakPoint(break_point_number);
-  if (ignoreCount) {
-    Debug.changeBreakPointIgnoreCount(break_point_number, ignoreCount);
-  }
   if (!enabled) {
     Debug.disableBreakPoint(break_point_number);
   }
@@ -1617,7 +1544,6 @@
   var break_point = TO_NUMBER(request.arguments.breakpoint);
   var enabled = request.arguments.enabled;
   var condition = request.arguments.condition;
-  var ignoreCount = request.arguments.ignoreCount;
 
   // Check for legal arguments.
   if (!break_point) {
@@ -1638,11 +1564,6 @@
   if (!IS_UNDEFINED(condition)) {
     Debug.changeBreakPointCondition(break_point, condition);
   }
-
-  // Change ignore count if supplied
-  if (!IS_UNDEFINED(ignoreCount)) {
-    Debug.changeBreakPointIgnoreCount(break_point, ignoreCount);
-  }
 };
 
 
@@ -1717,10 +1638,8 @@
       line: break_point.line(),
       column: break_point.column(),
       groupId: break_point.groupId(),
-      hit_count: break_point.hit_count(),
       active: break_point.active(),
       condition: break_point.condition(),
-      ignoreCount: break_point.ignoreCount(),
       actual_locations: break_point.actual_locations()
     };
 
@@ -2396,7 +2315,7 @@
     frame_mirror = this.exec_state_.frame();
   }
 
-  var result_description = Debug.LiveEdit.RestartFrame(frame_mirror);
+  var result_description = frame_mirror.restart();
   response.body = {result: result_description};
 };
 
diff --git a/src/debug/ia32/debug-ia32.cc b/src/debug/ia32/debug-ia32.cc
index d489a01..95f2bc6 100644
--- a/src/debug/ia32/debug-ia32.cc
+++ b/src/debug/ia32/debug-ia32.cc
@@ -50,6 +50,9 @@
   DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
 }
 
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+  return !Assembler::IsNop(pc);
+}
 
 void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
                                           DebugBreakCallHelperMode mode) {
diff --git a/src/debug/liveedit.cc b/src/debug/liveedit.cc
index f1f3f23..91c990d 100644
--- a/src/debug/liveedit.cc
+++ b/src/debug/liveedit.cc
@@ -1857,8 +1857,8 @@
     HandleScope scope(isolate);
 
     for (int i = 0; i < len; i++) {
-      Handle<JSValue> jsvalue =
-          Handle<JSValue>::cast(FixedArray::get(shared_info_array, i));
+      Handle<JSValue> jsvalue = Handle<JSValue>::cast(
+          FixedArray::get(*shared_info_array, i, isolate));
       Handle<SharedFunctionInfo> shared =
           UnwrapSharedFunctionInfoFromJSValue(jsvalue);
 
diff --git a/src/debug/mips/debug-mips.cc b/src/debug/mips/debug-mips.cc
index c5c58d0..1d9f7d6 100644
--- a/src/debug/mips/debug-mips.cc
+++ b/src/debug/mips/debug-mips.cc
@@ -56,6 +56,10 @@
   patcher.masm()->Call(v8::internal::t9);
 }
 
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+  Instr current_instr = Assembler::instr_at(pc);
+  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
 
 void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
                                           DebugBreakCallHelperMode mode) {
diff --git a/src/debug/mips64/debug-mips64.cc b/src/debug/mips64/debug-mips64.cc
index 1d65fd9..0646a24 100644
--- a/src/debug/mips64/debug-mips64.cc
+++ b/src/debug/mips64/debug-mips64.cc
@@ -58,6 +58,10 @@
   patcher.masm()->Call(v8::internal::t9);
 }
 
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+  Instr current_instr = Assembler::instr_at(pc);
+  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
 
 void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
                                           DebugBreakCallHelperMode mode) {
diff --git a/src/debug/mirrors.js b/src/debug/mirrors.js
index 1fd5fa9..8b9dd02 100644
--- a/src/debug/mirrors.js
+++ b/src/debug/mirrors.js
@@ -812,7 +812,7 @@
     // Skip properties which are defined through accessors.
     var property = properties[i];
     if (property.propertyType() != PropertyType.AccessorConstant) {
-      if (%_ObjectEquals(property.value_, value.value_)) {
+      if (property.value_ === value.value_) {
         return property;
       }
     }
diff --git a/src/debug/ppc/debug-ppc.cc b/src/debug/ppc/debug-ppc.cc
index c5ddab8..aab5399 100644
--- a/src/debug/ppc/debug-ppc.cc
+++ b/src/debug/ppc/debug-ppc.cc
@@ -64,6 +64,10 @@
   patcher.masm()->bctrl();
 }
 
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+  Instr current_instr = Assembler::instr_at(pc);
+  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
 
 void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
                                           DebugBreakCallHelperMode mode) {
diff --git a/src/debug/x64/debug-x64.cc b/src/debug/x64/debug-x64.cc
index 0d56ea7..f7fbe76 100644
--- a/src/debug/x64/debug-x64.cc
+++ b/src/debug/x64/debug-x64.cc
@@ -51,6 +51,9 @@
   DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
 }
 
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+  return !Assembler::IsNop(pc);
+}
 
 void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
                                           DebugBreakCallHelperMode mode) {
diff --git a/src/debug/x87/debug-x87.cc b/src/debug/x87/debug-x87.cc
index 8c04e02..8ddb82f 100644
--- a/src/debug/x87/debug-x87.cc
+++ b/src/debug/x87/debug-x87.cc
@@ -50,6 +50,9 @@
   DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
 }
 
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+  return !Assembler::IsNop(pc);
+}
 
 void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
                                           DebugBreakCallHelperMode mode) {
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 4bdafbf..e00e5ab 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -11,8 +11,10 @@
 #include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/global-handles.h"
+#include "src/interpreter/interpreter.h"
 #include "src/macro-assembler.h"
 #include "src/profiler/cpu-profiler.h"
+#include "src/tracing/trace-event.h"
 #include "src/v8.h"
 
 
@@ -35,7 +37,6 @@
 
 DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
     : allocator_(allocator),
-      deoptimized_frame_info_(NULL),
       current_(NULL) {
   for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
     deopt_entry_code_entries_[i] = -1;
@@ -52,13 +53,6 @@
 }
 
 
-void DeoptimizerData::Iterate(ObjectVisitor* v) {
-  if (deoptimized_frame_info_ != NULL) {
-    deoptimized_frame_info_->Iterate(v);
-  }
-}
-
-
 Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
   if (function_->IsHeapObject()) {
     // Search all deoptimizing code in the native context of the function.
@@ -140,73 +134,27 @@
     int jsframe_index,
     Isolate* isolate) {
   CHECK(frame->is_optimized());
-  CHECK(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
 
-  // Get the function and code from the frame.
-  JSFunction* function = frame->function();
-  Code* code = frame->LookupCode();
+  TranslatedState translated_values(frame);
+  translated_values.Prepare(false, frame->fp());
 
-  // Locate the deoptimization point in the code. As we are at a call the
-  // return address must be at a place in the code with deoptimization support.
-  SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
-  int deoptimization_index = safepoint_entry.deoptimization_index();
-  CHECK_NE(deoptimization_index, Safepoint::kNoDeoptimizationIndex);
+  TranslatedState::iterator frame_it = translated_values.end();
+  int counter = jsframe_index;
+  for (auto it = translated_values.begin(); it != translated_values.end();
+       it++) {
+    if (it->kind() == TranslatedFrame::kFunction ||
+        it->kind() == TranslatedFrame::kInterpretedFunction) {
+      if (counter == 0) {
+        frame_it = it;
+        break;
+      }
+      counter--;
+    }
+  }
+  CHECK(frame_it != translated_values.end());
 
-  // Always use the actual stack slots when calculating the fp to sp
-  // delta adding two for the function and context.
-  unsigned stack_slots = code->stack_slots();
-  unsigned arguments_stack_height =
-      Deoptimizer::ComputeOutgoingArgumentSize(code, deoptimization_index);
-  unsigned fp_to_sp_delta = (stack_slots * kPointerSize) +
-                            StandardFrameConstants::kFixedFrameSizeFromFp +
-                            arguments_stack_height;
-
-  Deoptimizer* deoptimizer = new Deoptimizer(isolate,
-                                             function,
-                                             Deoptimizer::DEBUGGER,
-                                             deoptimization_index,
-                                             frame->pc(),
-                                             fp_to_sp_delta,
-                                             code);
-  Address tos = frame->fp() - fp_to_sp_delta;
-  deoptimizer->FillInputFrame(tos, frame);
-
-  // Calculate the output frames.
-  Deoptimizer::ComputeOutputFrames(deoptimizer);
-
-  // Create the GC safe output frame information and register it for GC
-  // handling.
-  CHECK_LT(jsframe_index, deoptimizer->jsframe_count());
-
-  // Convert JS frame index into frame index.
-  int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
-
-  bool has_arguments_adaptor =
-      frame_index > 0 &&
-      deoptimizer->output_[frame_index - 1]->GetFrameType() ==
-      StackFrame::ARGUMENTS_ADAPTOR;
-
-  int construct_offset = has_arguments_adaptor ? 2 : 1;
-  bool has_construct_stub =
-      frame_index >= construct_offset &&
-      deoptimizer->output_[frame_index - construct_offset]->GetFrameType() ==
-      StackFrame::CONSTRUCT;
-
-  DeoptimizedFrameInfo* info = new DeoptimizedFrameInfo(deoptimizer,
-                                                        frame_index,
-                                                        has_arguments_adaptor,
-                                                        has_construct_stub);
-  isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
-
-  // Done with the GC-unsafe frame descriptions. This re-enables allocation.
-  deoptimizer->DeleteFrameDescriptions();
-
-  // Allocate a heap number for the doubles belonging to this frame.
-  deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
-      frame_index, info->parameters_count(), info->expression_count(), info);
-
-  // Finished using the deoptimizer instance.
-  delete deoptimizer;
+  DeoptimizedFrameInfo* info =
+      new DeoptimizedFrameInfo(&translated_values, frame_it, isolate);
 
   return info;
 }
@@ -214,9 +162,7 @@
 
 void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
                                                  Isolate* isolate) {
-  CHECK_EQ(isolate->deoptimizer_data()->deoptimized_frame_info_, info);
   delete info;
-  isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
 }
 
 
@@ -394,8 +340,8 @@
     element = next;
   }
 
-  // TODO(titzer): we need a handle scope only because of the macro assembler,
-  // which is only used in EnsureCodeForDeoptimizationEntry.
+  // We need a handle scope only because of the macro assembler,
+  // which is used in code patching in EnsureCodeForDeoptimizationEntry.
   HandleScope scope(isolate);
 
   // Now patch all the codes for deoptimization.
@@ -426,6 +372,8 @@
 
 
 void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
+  TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
+  TRACE_EVENT0("v8", "V8.DeoptimizeCode");
   if (FLAG_trace_deopt) {
     CodeTracer::Scope scope(isolate->GetCodeTracer());
     PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
@@ -443,6 +391,8 @@
 
 
 void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
+  TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
+  TRACE_EVENT0("v8", "V8.DeoptimizeCode");
   if (FLAG_trace_deopt) {
     CodeTracer::Scope scope(isolate->GetCodeTracer());
     PrintF(scope.file(), "[deoptimize marked code in all contexts]\n");
@@ -470,6 +420,8 @@
 
 
 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+  TimerEventScope<TimerEventDeoptimizeCode> timer(function->GetIsolate());
+  TRACE_EVENT0("v8", "V8.DeoptimizeCode");
   Code* code = function->code();
   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
     // Mark the code for deoptimization and unlink any functions that also
@@ -513,7 +465,6 @@
   return NULL;
 }
 
-
 Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
                          BailoutType type, unsigned bailout_id, Address from,
                          int fp_to_sp_delta, Code* optimized_code)
@@ -524,11 +475,19 @@
       from_(from),
       fp_to_sp_delta_(fp_to_sp_delta),
       has_alignment_padding_(0),
+      deoptimizing_throw_(false),
+      catch_handler_data_(-1),
+      catch_handler_pc_offset_(-1),
       input_(nullptr),
       output_count_(0),
       jsframe_count_(0),
       output_(nullptr),
       trace_scope_(nullptr) {
+  if (isolate->deoptimizer_lazy_throw()) {
+    isolate->set_deoptimizer_lazy_throw(false);
+    deoptimizing_throw_ = true;
+  }
+
   // For COMPILED_STUBs called from builtins, the function pointer is a SMI
   // indicating an internal frame.
   if (function->IsSmi()) {
@@ -567,7 +526,11 @@
     PROFILE(isolate_, CodeDeoptEvent(compiled_code_, from_, fp_to_sp_delta_));
   }
   unsigned size = ComputeInputFrameSize();
-  input_ = new(size) FrameDescription(size, function);
+  int parameter_count =
+      function == nullptr
+          ? 0
+          : (function->shared()->internal_formal_parameter_count() + 1);
+  input_ = new (size) FrameDescription(size, parameter_count);
   input_->SetFrameType(frame_type);
 }
 
@@ -702,6 +665,41 @@
   return length;
 }
 
+namespace {
+
+int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
+  switch (translated_frame->kind()) {
+    case TranslatedFrame::kFunction: {
+      BailoutId node_id = translated_frame->node_id();
+      JSFunction* function =
+          JSFunction::cast(translated_frame->begin()->GetRawValue());
+      Code* non_optimized_code = function->shared()->code();
+      FixedArray* raw_data = non_optimized_code->deoptimization_data();
+      DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+      unsigned pc_and_state =
+          Deoptimizer::GetOutputInfo(data, node_id, function->shared());
+      unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+      HandlerTable* table =
+          HandlerTable::cast(non_optimized_code->handler_table());
+      HandlerTable::CatchPrediction prediction;
+      return table->LookupRange(pc_offset, data_out, &prediction);
+    }
+    case TranslatedFrame::kInterpretedFunction: {
+      int bytecode_offset = translated_frame->node_id().ToInt();
+      JSFunction* function =
+          JSFunction::cast(translated_frame->begin()->GetRawValue());
+      BytecodeArray* bytecode = function->shared()->bytecode_array();
+      HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+      HandlerTable::CatchPrediction prediction;
+      return table->LookupRange(bytecode_offset, data_out, &prediction);
+    }
+    default:
+      break;
+  }
+  return -1;
+}
+
+}  // namespace
 
 // We rely on this function not causing a GC.  It is called from generated code
 // without having a real stack frame in place.
@@ -742,6 +740,22 @@
 
   // Do the input frame to output frame(s) translation.
   size_t count = translated_state_.frames().size();
+  // If we are supposed to go to the catch handler, find the catching frame
+  // for the catch and make sure we only deoptimize upto that frame.
+  if (deoptimizing_throw_) {
+    size_t catch_handler_frame_index = count;
+    for (size_t i = count; i-- > 0;) {
+      catch_handler_pc_offset_ = LookupCatchHandler(
+          &(translated_state_.frames()[i]), &catch_handler_data_);
+      if (catch_handler_pc_offset_ >= 0) {
+        catch_handler_frame_index = i;
+        break;
+      }
+    }
+    CHECK_LT(catch_handler_frame_index, count);
+    count = catch_handler_frame_index + 1;
+  }
+
   DCHECK(output_ == NULL);
   output_ = new FrameDescription*[count];
   for (size_t i = 0; i < count; ++i) {
@@ -760,11 +774,12 @@
     int frame_index = static_cast<int>(i);
     switch (translated_state_.frames()[i].kind()) {
       case TranslatedFrame::kFunction:
-        DoComputeJSFrame(frame_index);
+        DoComputeJSFrame(frame_index, deoptimizing_throw_ && i == count - 1);
         jsframe_count_++;
         break;
       case TranslatedFrame::kInterpretedFunction:
-        DoComputeInterpretedFrame(frame_index);
+        DoComputeInterpretedFrame(frame_index,
+                                  deoptimizing_throw_ && i == count - 1);
         jsframe_count_++;
         break;
       case TranslatedFrame::kArgumentsAdaptor:
@@ -809,40 +824,53 @@
   }
 }
 
-
-void Deoptimizer::DoComputeJSFrame(int frame_index) {
+void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
   TranslatedFrame* translated_frame =
       &(translated_state_.frames()[frame_index]);
+  SharedFunctionInfo* shared = translated_frame->raw_shared_info();
+
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
+  bool is_bottommost = (0 == frame_index);
+  bool is_topmost = (output_count_ - 1 == frame_index);
   int input_index = 0;
 
   BailoutId node_id = translated_frame->node_id();
   unsigned height =
       translated_frame->height() - 1;  // Do not count the context.
   unsigned height_in_bytes = height * kPointerSize;
+  if (goto_catch_handler) {
+    // Take the stack height from the handler table.
+    height = catch_handler_data_;
+    // We also make space for the exception itself.
+    height_in_bytes = (height + 1) * kPointerSize;
+    CHECK(is_topmost);
+  }
+
   JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
   value_iterator++;
   input_index++;
   if (trace_scope_ != NULL) {
     PrintF(trace_scope_->file(), "  translating frame ");
-    function->PrintName(trace_scope_->file());
+    base::SmartArrayPointer<char> name = shared->DebugName()->ToCString();
+    PrintF(trace_scope_->file(), "%s", name.get());
     PrintF(trace_scope_->file(),
            " => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
+    PrintF(trace_scope_->file(), " => node=%d, height=%d%s\n", node_id.ToInt(),
+           height_in_bytes, goto_catch_handler ? " (throw)" : "");
   }
 
   // The 'fixed' part of the frame consists of the incoming parameters and
   // the part described by JavaScriptFrameConstants.
-  unsigned fixed_frame_size = ComputeJavascriptFixedSize(function);
+  unsigned fixed_frame_size = ComputeJavascriptFixedSize(shared);
   unsigned input_frame_size = input_->GetFrameSize();
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
+  int parameter_count = shared->internal_formal_parameter_count() + 1;
+  FrameDescription* output_frame = new (output_frame_size)
+      FrameDescription(output_frame_size, parameter_count);
   output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
 
-  bool is_bottommost = (0 == frame_index);
-  bool is_topmost = (output_count_ - 1 == frame_index);
   CHECK(frame_index >= 0 && frame_index < output_count_);
   CHECK_NULL(output_[frame_index]);
   output_[frame_index] = output_frame;
@@ -856,9 +884,8 @@
   if (is_bottommost) {
     // Determine whether the input frame contains alignment padding.
     has_alignment_padding_ =
-        (!compiled_code_->is_turbofanned() && HasAlignmentPadding(function))
-            ? 1
-            : 0;
+        (!compiled_code_->is_turbofanned() && HasAlignmentPadding(shared)) ? 1
+                                                                           : 0;
     // 2 = context and function in the frame.
     // If the optimized frame had alignment padding, adjust the frame pointer
     // to point to the new position of the old frame pointer after padding
@@ -872,8 +899,6 @@
   output_frame->SetTop(top_address);
 
   // Compute the incoming parameter translation.
-  int parameter_count =
-      function->shared()->internal_formal_parameter_count() + 1;
   unsigned output_offset = output_frame_size;
   unsigned input_offset = input_frame_size;
   for (int i = 0; i < parameter_count; ++i) {
@@ -945,8 +970,20 @@
   Register context_reg = JavaScriptFrame::context_register();
   output_offset -= kPointerSize;
   input_offset -= kPointerSize;
+
+  TranslatedFrame::iterator context_pos = value_iterator;
+  int context_input_index = input_index;
+  // When deoptimizing into a catch block, we need to take the context
+  // from just above the top of the operand stack (we push the context
+  // at the entry of the try block).
+  if (goto_catch_handler) {
+    for (unsigned i = 0; i < height + 1; ++i) {
+      context_pos++;
+      context_input_index++;
+    }
+  }
   // Read the context from the translations.
-  Object* context = value_iterator->GetRawValue();
+  Object* context = context_pos->GetRawValue();
   if (context == isolate_->heap()->undefined_value()) {
     // If the context was optimized away, just use the context from
     // the activation. This should only apply to Crankshaft code.
@@ -959,13 +996,13 @@
   value = reinterpret_cast<intptr_t>(context);
   output_frame->SetContext(value);
   if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
-  WriteValueToOutput(context, input_index, frame_index, output_offset,
+  WriteValueToOutput(context, context_input_index, frame_index, output_offset,
                      "context    ");
   if (context == isolate_->heap()->arguments_marker()) {
     Address output_address =
         reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
         output_offset;
-    values_to_materialize_.push_back({output_address, value_iterator});
+    values_to_materialize_.push_back({output_address, context_pos});
   }
   value_iterator++;
   input_index++;
@@ -985,19 +1022,19 @@
     WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
                                  output_offset);
   }
+  if (goto_catch_handler) {
+    // Write out the exception for the catch handler.
+    output_offset -= kPointerSize;
+    Object* exception_obj = reinterpret_cast<Object*>(
+        input_->GetRegister(FullCodeGenerator::result_register().code()));
+    WriteValueToOutput(exception_obj, input_index, frame_index, output_offset,
+                       "exception   ");
+    input_index++;
+  }
   CHECK_EQ(0u, output_offset);
 
-  // Compute this frame's PC, state, and continuation.
-  Code* non_optimized_code = function->shared()->code();
-  FixedArray* raw_data = non_optimized_code->deoptimization_data();
-  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
-  Address start = non_optimized_code->instruction_start();
-  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
-  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
-  intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
-  output_frame->SetPc(pc_value);
-
   // Update constant pool.
+  Code* non_optimized_code = shared->code();
   if (FLAG_enable_embedded_constant_pool) {
     intptr_t constant_pool_value =
         reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
@@ -1009,8 +1046,22 @@
     }
   }
 
+  // Compute this frame's PC, state, and continuation.
+  FixedArray* raw_data = non_optimized_code->deoptimization_data();
+  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+  Address start = non_optimized_code->instruction_start();
+  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+  unsigned pc_offset = goto_catch_handler
+                           ? catch_handler_pc_offset_
+                           : FullCodeGenerator::PcField::decode(pc_and_state);
+  intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
+  output_frame->SetPc(pc_value);
+
+  // If we are going to the catch handler, then the exception lives in
+  // the accumulator.
   FullCodeGenerator::State state =
-      FullCodeGenerator::StateField::decode(pc_and_state);
+      goto_catch_handler ? FullCodeGenerator::TOS_REG
+                         : FullCodeGenerator::StateField::decode(pc_and_state);
   output_frame->SetState(Smi::FromInt(state));
 
   // Set the continuation for the topmost frame.
@@ -1029,14 +1080,16 @@
   }
 }
 
-
-void Deoptimizer::DoComputeInterpretedFrame(int frame_index) {
+void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
+                                            bool goto_catch_handler) {
   TranslatedFrame* translated_frame =
       &(translated_state_.frames()[frame_index]);
+  SharedFunctionInfo* shared = translated_frame->raw_shared_info();
+
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
   int input_index = 0;
 
-  BailoutId bytecode_offset = translated_frame->node_id();
+  int bytecode_offset = translated_frame->node_id().ToInt();
   unsigned height = translated_frame->height();
   unsigned height_in_bytes = height * kPointerSize;
   JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
@@ -1044,20 +1097,26 @@
   input_index++;
   if (trace_scope_ != NULL) {
     PrintF(trace_scope_->file(), "  translating interpreted frame ");
-    function->PrintName(trace_scope_->file());
-    PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d\n",
-           bytecode_offset.ToInt(), height_in_bytes);
+    base::SmartArrayPointer<char> name = shared->DebugName()->ToCString();
+    PrintF(trace_scope_->file(), "%s", name.get());
+    PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d%s\n",
+           bytecode_offset, height_in_bytes,
+           goto_catch_handler ? " (throw)" : "");
+  }
+  if (goto_catch_handler) {
+    bytecode_offset = catch_handler_pc_offset_;
   }
 
   // The 'fixed' part of the frame consists of the incoming parameters and
   // the part described by InterpreterFrameConstants.
-  unsigned fixed_frame_size = ComputeInterpretedFixedSize(function);
+  unsigned fixed_frame_size = ComputeInterpretedFixedSize(shared);
   unsigned input_frame_size = input_->GetFrameSize();
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new (output_frame_size) FrameDescription(output_frame_size, function);
+  int parameter_count = shared->internal_formal_parameter_count() + 1;
+  FrameDescription* output_frame = new (output_frame_size)
+      FrameDescription(output_frame_size, parameter_count);
   output_frame->SetFrameType(StackFrame::INTERPRETED);
 
   bool is_bottommost = (0 == frame_index);
@@ -1084,8 +1143,6 @@
   output_frame->SetTop(top_address);
 
   // Compute the incoming parameter translation.
-  int parameter_count =
-      function->shared()->internal_formal_parameter_count() + 1;
   unsigned output_offset = output_frame_size;
   unsigned input_offset = input_frame_size;
   for (int i = 0; i < parameter_count; ++i) {
@@ -1159,14 +1216,27 @@
   Register context_reg = InterpretedFrame::context_register();
   output_offset -= kPointerSize;
   input_offset -= kPointerSize;
+
+  // When deoptimizing into a catch block, we need to take the context
+  // from a register that was specified in the handler table.
+  TranslatedFrame::iterator context_pos = value_iterator;
+  int context_input_index = input_index;
+  if (goto_catch_handler) {
+    // Skip to the translated value of the register specified
+    // in the handler table.
+    for (int i = 0; i < catch_handler_data_ + 1; ++i) {
+      context_pos++;
+      context_input_index++;
+    }
+  }
   // Read the context from the translations.
-  Object* context = value_iterator->GetRawValue();
+  Object* context = context_pos->GetRawValue();
   // The context should not be a placeholder for a materialized object.
   CHECK(context != isolate_->heap()->arguments_marker());
   value = reinterpret_cast<intptr_t>(context);
   output_frame->SetContext(value);
   if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
-  WriteValueToOutput(context, input_index, frame_index, output_offset,
+  WriteValueToOutput(context, context_input_index, frame_index, output_offset,
                      "context    ");
   value_iterator++;
   input_index++;
@@ -1180,45 +1250,64 @@
   DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
   WriteValueToOutput(function, 0, frame_index, output_offset, "function    ");
 
-  // TODO(rmcilroy): Deal with new.target correctly - currently just set it to
+  // The new.target slot is only used during function activiation which is
+  // before the first deopt point, so should never be needed. Just set it to
   // undefined.
   output_offset -= kPointerSize;
   input_offset -= kPointerSize;
   Object* new_target = isolate_->heap()->undefined_value();
   WriteValueToOutput(new_target, 0, frame_index, output_offset, "new_target  ");
 
+  // Set the bytecode array pointer.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  Object* bytecode_array = shared->bytecode_array();
+  WriteValueToOutput(bytecode_array, 0, frame_index, output_offset,
+                     "bytecode array ");
+
   // The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
   output_offset -= kPointerSize;
   input_offset -= kPointerSize;
   int raw_bytecode_offset =
-      BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset.ToInt();
+      BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset;
   Smi* smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
   WriteValueToOutput(smi_bytecode_offset, 0, frame_index, output_offset,
                      "bytecode offset ");
 
   // Translate the rest of the interpreter registers in the frame.
-  for (unsigned i = 0; i < height; ++i) {
+  for (unsigned i = 0; i < height - 1; ++i) {
     output_offset -= kPointerSize;
     WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
                                  output_offset);
   }
+
+  // Put the accumulator on the stack. It will be popped by the
+  // InterpreterNotifyDeopt builtin (possibly after materialization).
+  output_offset -= kPointerSize;
+  if (goto_catch_handler) {
+    // If we are lazy deopting to a catch handler, we set the accumulator to
+    // the exception (which lives in the result register).
+    intptr_t accumulator_value =
+        input_->GetRegister(FullCodeGenerator::result_register().code());
+    WriteValueToOutput(reinterpret_cast<Object*>(accumulator_value), 0,
+                       frame_index, output_offset, "accumulator ");
+    value_iterator++;
+  } else {
+    WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+                                 output_offset);
+  }
   CHECK_EQ(0u, output_offset);
 
-  // Set the accumulator register.
-  output_frame->SetRegister(
-      kInterpreterAccumulatorRegister.code(),
-      reinterpret_cast<intptr_t>(value_iterator->GetRawValue()));
-  value_iterator++;
-
   Builtins* builtins = isolate_->builtins();
-  Code* trampoline = builtins->builtin(Builtins::kInterpreterEntryTrampoline);
-  output_frame->SetPc(reinterpret_cast<intptr_t>(trampoline->entry()));
+  Code* dispatch_builtin =
+      builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+  output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
   output_frame->SetState(0);
 
   // Update constant pool.
   if (FLAG_enable_embedded_constant_pool) {
     intptr_t constant_pool_value =
-        reinterpret_cast<intptr_t>(trampoline->constant_pool());
+        reinterpret_cast<intptr_t>(dispatch_builtin->constant_pool());
     output_frame->SetConstantPool(constant_pool_value);
     if (is_topmost) {
       Register constant_pool_reg =
@@ -1266,8 +1355,9 @@
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
+  int parameter_count = height;
+  FrameDescription* output_frame = new (output_frame_size)
+      FrameDescription(output_frame_size, parameter_count);
   output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
 
   // Arguments adaptor can not be topmost or bottommost.
@@ -1282,7 +1372,6 @@
   output_frame->SetTop(top_address);
 
   // Compute the incoming parameter translation.
-  int parameter_count = height;
   unsigned output_offset = output_frame_size;
   for (int i = 0; i < parameter_count; ++i) {
     output_offset -= kPointerSize;
@@ -1362,7 +1451,7 @@
   Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
   unsigned height = translated_frame->height();
   unsigned height_in_bytes = height * kPointerSize;
-  JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
+  // Skip function.
   value_iterator++;
   input_index++;
   if (trace_scope_ != NULL) {
@@ -1375,7 +1464,7 @@
 
   // Allocate and store the output frame description.
   FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
+      new (output_frame_size) FrameDescription(output_frame_size);
   output_frame->SetFrameType(StackFrame::CONSTRUCT);
 
   // Construct stub can not be topmost or bottommost.
@@ -1488,7 +1577,7 @@
   TranslatedFrame::iterator value_iterator = translated_frame->begin();
   int input_index = 0;
 
-  JSFunction* accessor = JSFunction::cast(value_iterator->GetRawValue());
+  // Skip accessor.
   value_iterator++;
   input_index++;
   // The receiver (and the implicit return value, if any) are expected in
@@ -1515,7 +1604,7 @@
 
   // Allocate and store the output frame description.
   FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, accessor);
+      new (output_frame_size) FrameDescription(output_frame_size);
   output_frame->SetFrameType(StackFrame::INTERNAL);
 
   // A frame for an accessor stub can not be the topmost or bottommost one.
@@ -1657,7 +1746,9 @@
   // object to the stub failure handler.
   int param_count = descriptor.GetRegisterParameterCount();
   int stack_param_count = descriptor.GetStackParameterCount();
-  CHECK_EQ(translated_frame->height(), param_count);
+  // The translated frame contains all of the register parameters
+  // plus the context.
+  CHECK_EQ(translated_frame->height(), param_count + 1);
   CHECK_GE(param_count, 0);
 
   int height_in_bytes = kPointerSize * (param_count + stack_param_count) +
@@ -1674,7 +1765,7 @@
 
   // The stub failure trampoline is a single frame.
   FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, NULL);
+      new (output_frame_size) FrameDescription(output_frame_size);
   output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
   CHECK_EQ(frame_index, 0);
   output_[frame_index] = output_frame;
@@ -1716,15 +1807,10 @@
                          "caller's constant_pool\n");
   }
 
-  // The context can be gotten from the input frame.
-  Register context_reg = StubFailureTrampolineFrame::context_register();
-  input_frame_offset -= kPointerSize;
-  value = input_->GetFrameSlot(input_frame_offset);
-  output_frame->SetRegister(context_reg.code(), value);
+  // Remember where the context will need to be written back from the deopt
+  // translation.
   output_frame_offset -= kPointerSize;
-  output_frame->SetFrameSlot(output_frame_offset, value);
-  CHECK(reinterpret_cast<Object*>(value)->IsContext());
-  DebugPrintOutputSlot(value, frame_index, output_frame_offset, "context\n");
+  unsigned context_frame_offset = output_frame_offset;
 
   // A marker value is used in place of the function.
   output_frame_offset -= kPointerSize;
@@ -1782,6 +1868,15 @@
     }
   }
 
+  Object* maybe_context = value_iterator->GetRawValue();
+  CHECK(maybe_context->IsContext());
+  Register context_reg = StubFailureTrampolineFrame::context_register();
+  value = reinterpret_cast<intptr_t>(maybe_context);
+  output_frame->SetRegister(context_reg.code(), value);
+  output_frame->SetFrameSlot(context_frame_offset, value);
+  DebugPrintOutputSlot(value, frame_index, context_frame_offset, "context\n");
+  ++value_iterator;
+
   // Copy constant stack parameters to the failure frame. If the number of stack
   // parameters is not known in the descriptor, the arguments object is the way
   // to access them.
@@ -1875,55 +1970,6 @@
 }
 
 
-void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
-    int frame_index, int parameter_count, int expression_count,
-    DeoptimizedFrameInfo* info) {
-  CHECK_EQ(DEBUGGER, bailout_type_);
-
-  translated_state_.Prepare(false, nullptr);
-
-  TranslatedFrame* frame = &(translated_state_.frames()[frame_index]);
-  CHECK(frame->kind() == TranslatedFrame::kFunction);
-  int frame_arg_count = frame->shared_info()->internal_formal_parameter_count();
-
-  // The height is #expressions + 1 for context.
-  CHECK_EQ(expression_count + 1, frame->height());
-  TranslatedFrame* argument_frame = frame;
-  if (frame_index > 0) {
-    TranslatedFrame* previous_frame =
-        &(translated_state_.frames()[frame_index - 1]);
-    if (previous_frame->kind() == TranslatedFrame::kArgumentsAdaptor) {
-      argument_frame = previous_frame;
-      CHECK_EQ(parameter_count, argument_frame->height() - 1);
-    } else {
-      CHECK_EQ(frame_arg_count, parameter_count);
-    }
-  } else {
-    CHECK_EQ(frame_arg_count, parameter_count);
-  }
-
-  TranslatedFrame::iterator arg_iter = argument_frame->begin();
-  arg_iter++;  // Skip the function.
-  arg_iter++;  // Skip the receiver.
-  for (int i = 0; i < parameter_count; i++, arg_iter++) {
-    if (!arg_iter->IsMaterializedObject()) {
-      info->SetParameter(i, *(arg_iter->GetValue()));
-    }
-  }
-
-  TranslatedFrame::iterator iter = frame->begin();
-  // Skip the function, receiver, context and arguments.
-  for (int i = 0; i < frame_arg_count + 3; i++, iter++) {
-  }
-
-  for (int i = 0; i < expression_count; i++, iter++) {
-    if (!iter->IsMaterializedObject()) {
-      info->SetExpression(i, *(iter->GetValue()));
-    }
-  }
-}
-
-
 void Deoptimizer::WriteTranslatedValueToOutput(
     TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
     unsigned output_offset, const char* debug_hint_string,
@@ -1980,7 +2026,12 @@
 
 
 unsigned Deoptimizer::ComputeInputFrameSize() const {
-  unsigned fixed_size = ComputeJavascriptFixedSize(function_);
+  unsigned fixed_size = StandardFrameConstants::kFixedFrameSize;
+  if (!function_->IsSmi()) {
+    fixed_size += ComputeIncomingArgumentSize(function_->shared());
+  } else {
+    CHECK_EQ(Smi::cast(function_), Smi::FromInt(StackFrame::STUB));
+  }
   // The fp-to-sp delta already takes the context, constant pool pointer and the
   // function into account so we have to avoid double counting them.
   unsigned result = fixed_size + fp_to_sp_delta_ -
@@ -1989,39 +2040,33 @@
     unsigned stack_slots = compiled_code_->stack_slots();
     unsigned outgoing_size =
         ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
-    CHECK(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
+    CHECK(result ==
+          fixed_size + (stack_slots * kPointerSize) -
+              StandardFrameConstants::kFixedFrameSize + outgoing_size);
   }
   return result;
 }
 
-
-unsigned Deoptimizer::ComputeJavascriptFixedSize(JSFunction* function) const {
+// static
+unsigned Deoptimizer::ComputeJavascriptFixedSize(SharedFunctionInfo* shared) {
   // The fixed part of the frame consists of the return address, frame
   // pointer, function, context, and all the incoming arguments.
-  return ComputeIncomingArgumentSize(function) +
+  return ComputeIncomingArgumentSize(shared) +
          StandardFrameConstants::kFixedFrameSize;
 }
 
-
-unsigned Deoptimizer::ComputeInterpretedFixedSize(JSFunction* function) const {
+// static
+unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo* shared) {
   // The fixed part of the frame consists of the return address, frame
   // pointer, function, context, new.target, bytecode offset and all the
   // incoming arguments.
-  return ComputeIncomingArgumentSize(function) +
+  return ComputeIncomingArgumentSize(shared) +
          InterpreterFrameConstants::kFixedFrameSize;
 }
 
-
-unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
-  // The incoming arguments is the values for formal parameters and
-  // the receiver. Every slot contains a pointer.
-  if (function->IsSmi()) {
-    CHECK_EQ(Smi::cast(function), Smi::FromInt(StackFrame::STUB));
-    return 0;
-  }
-  unsigned arguments =
-      function->shared()->internal_formal_parameter_count() + 1;
-  return arguments * kPointerSize;
+// static
+unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) {
+  return (shared->internal_formal_parameter_count() + 1) * kPointerSize;
 }
 
 
@@ -2079,11 +2124,9 @@
   data->deopt_entry_code_entries_[type] = entry_count;
 }
 
-
-FrameDescription::FrameDescription(uint32_t frame_size,
-                                   JSFunction* function)
+FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
     : frame_size_(frame_size),
-      function_(function),
+      parameter_count_(parameter_count),
       top_(kZapUint32),
       pc_(kZapUint32),
       fp_(kZapUint32),
@@ -2107,10 +2150,10 @@
 int FrameDescription::ComputeFixedSize() {
   if (type_ == StackFrame::INTERPRETED) {
     return InterpreterFrameConstants::kFixedFrameSize +
-           (ComputeParametersCount() + 1) * kPointerSize;
+           parameter_count() * kPointerSize;
   } else {
     return StandardFrameConstants::kFixedFrameSize +
-           (ComputeParametersCount() + 1) * kPointerSize;
+           parameter_count() * kPointerSize;
   }
 }
 
@@ -2123,54 +2166,13 @@
     return base - ((slot_index + 1) * kPointerSize);
   } else {
     // Incoming parameter.
-    int arg_size = (ComputeParametersCount() + 1) * kPointerSize;
+    int arg_size = parameter_count() * kPointerSize;
     unsigned base = GetFrameSize() - arg_size;
     return base - ((slot_index + 1) * kPointerSize);
   }
 }
 
 
-int FrameDescription::ComputeParametersCount() {
-  switch (type_) {
-    case StackFrame::JAVA_SCRIPT:
-      return function_->shared()->internal_formal_parameter_count();
-    case StackFrame::ARGUMENTS_ADAPTOR: {
-      // Last slot contains number of incomming arguments as a smi.
-      // Can't use GetExpression(0) because it would cause infinite recursion.
-      return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
-    }
-    case StackFrame::STUB:
-      return -1;  // Minus receiver.
-    default:
-      FATAL("Unexpected stack frame type");
-      return 0;
-  }
-}
-
-
-Object* FrameDescription::GetParameter(int index) {
-  CHECK_GE(index, 0);
-  CHECK_LT(index, ComputeParametersCount());
-  // The slot indexes for incoming arguments are negative.
-  unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
-  return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
-}
-
-
-unsigned FrameDescription::GetExpressionCount() {
-  CHECK_EQ(StackFrame::JAVA_SCRIPT, type_);
-  unsigned size = GetFrameSize() - ComputeFixedSize();
-  return size / kPointerSize;
-}
-
-
-Object* FrameDescription::GetExpression(int index) {
-  DCHECK_EQ(StackFrame::JAVA_SCRIPT, type_);
-  unsigned offset = GetOffsetFromSlotIndex(index);
-  return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
-}
-
-
 void TranslationBuffer::Add(int32_t value, Zone* zone) {
   // This wouldn't handle kMinInt correctly if it ever encountered it.
   DCHECK(value != kMinInt);
@@ -2359,14 +2361,13 @@
 
 
 void Translation::StoreJSFrameFunction() {
-  buffer_->Add(JS_FRAME_FUNCTION, zone());
+  StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
+                  StandardFrameConstants::kMarkerOffset) /
+                 kPointerSize);
 }
 
-
 int Translation::NumberOfOperandsFor(Opcode opcode) {
   switch (opcode) {
-    case JS_FRAME_FUNCTION:
-      return 0;
     case GETTER_STUB_FRAME:
     case SETTER_STUB_FRAME:
     case DUPLICATED_OBJECT:
@@ -2493,60 +2494,111 @@
   return new_array;
 }
 
+namespace {
 
-DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
-                                           int frame_index,
-                                           bool has_arguments_adaptor,
-                                           bool has_construct_stub) {
-  FrameDescription* output_frame = deoptimizer->output_[frame_index];
-  function_ = output_frame->GetFunction();
-  context_ = reinterpret_cast<Object*>(output_frame->GetContext());
-  has_construct_stub_ = has_construct_stub;
-  expression_count_ = output_frame->GetExpressionCount();
-  expression_stack_ = new Object* [expression_count_];
-  // Get the source position using the unoptimized code.
-  Address pc = reinterpret_cast<Address>(output_frame->GetPc());
-  Code* code = Code::cast(deoptimizer->isolate()->FindCodeObject(pc));
-  source_position_ = code->SourcePosition(pc);
-
-  for (int i = 0; i < expression_count_; i++) {
-    Object* value = output_frame->GetExpression(i);
-    // Replace materialization markers with the undefined value.
-    if (value == deoptimizer->isolate()->heap()->arguments_marker()) {
-      value = deoptimizer->isolate()->heap()->undefined_value();
+Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it,
+                                   Isolate* isolate) {
+  if (it->GetRawValue() == isolate->heap()->arguments_marker()) {
+    if (!it->IsMaterializableByDebugger()) {
+      return isolate->factory()->undefined_value();
     }
-    SetExpression(i, value);
   }
+  return it->GetValue();
+}
 
-  if (has_arguments_adaptor) {
-    output_frame = deoptimizer->output_[frame_index - 1];
-    CHECK_EQ(output_frame->GetFrameType(), StackFrame::ARGUMENTS_ADAPTOR);
-  }
-
-  parameters_count_ = output_frame->ComputeParametersCount();
-  parameters_ = new Object* [parameters_count_];
-  for (int i = 0; i < parameters_count_; i++) {
-    Object* value = output_frame->GetParameter(i);
-    // Replace materialization markers with the undefined value.
-    if (value == deoptimizer->isolate()->heap()->arguments_marker()) {
-      value = deoptimizer->isolate()->heap()->undefined_value();
-    }
-    SetParameter(i, value);
+int ComputeSourcePosition(Handle<SharedFunctionInfo> shared,
+                          BailoutId node_id) {
+  if (shared->HasBytecodeArray()) {
+    BytecodeArray* bytecodes = shared->bytecode_array();
+    return bytecodes->SourcePosition(node_id.ToInt());
+  } else {
+    Code* non_optimized_code = shared->code();
+    FixedArray* raw_data = non_optimized_code->deoptimization_data();
+    DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+    unsigned pc_and_state = Deoptimizer::GetOutputInfo(data, node_id, *shared);
+    unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+    return non_optimized_code->SourcePosition(pc_offset);
   }
 }
 
+}  // namespace
 
-DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
-  delete[] expression_stack_;
-  delete[] parameters_;
-}
+DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
+                                           TranslatedState::iterator frame_it,
+                                           Isolate* isolate) {
+  // If the previous frame is an adaptor frame, we will take the parameters
+  // from there.
+  TranslatedState::iterator parameter_frame = frame_it;
+  if (parameter_frame != state->begin()) {
+    parameter_frame--;
+  }
+  int parameter_count;
+  if (parameter_frame->kind() == TranslatedFrame::kArgumentsAdaptor) {
+    parameter_count = parameter_frame->height() - 1;  // Ignore the receiver.
+  } else {
+    parameter_frame = frame_it;
+    parameter_count =
+        frame_it->shared_info()->internal_formal_parameter_count();
+  }
+  TranslatedFrame::iterator parameter_it = parameter_frame->begin();
+  parameter_it++;  // Skip the function.
+  parameter_it++;  // Skip the receiver.
 
+  // Figure out whether there is a construct stub frame on top of
+  // the parameter frame.
+  has_construct_stub_ =
+      parameter_frame != state->begin() &&
+      (parameter_frame - 1)->kind() == TranslatedFrame::kConstructStub;
 
-void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
-  v->VisitPointer(bit_cast<Object**>(&function_));
-  v->VisitPointer(&context_);
-  v->VisitPointers(parameters_, parameters_ + parameters_count_);
-  v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
+  source_position_ =
+      ComputeSourcePosition(frame_it->shared_info(), frame_it->node_id());
+
+  TranslatedFrame::iterator value_it = frame_it->begin();
+  // Get the function. Note that this might materialize the function.
+  // In case the debugger mutates this value, we should deoptimize
+  // the function and remember the value in the materialized value store.
+  function_ = Handle<JSFunction>::cast(value_it->GetValue());
+
+  parameters_.resize(static_cast<size_t>(parameter_count));
+  for (int i = 0; i < parameter_count; i++) {
+    Handle<Object> parameter = GetValueForDebugger(parameter_it, isolate);
+    SetParameter(i, parameter);
+    parameter_it++;
+  }
+
+  // Skip the function, the receiver and the arguments.
+  int skip_count =
+      frame_it->shared_info()->internal_formal_parameter_count() + 2;
+  TranslatedFrame::iterator stack_it = frame_it->begin();
+  for (int i = 0; i < skip_count; i++) {
+    stack_it++;
+  }
+
+  // Get the context.
+  context_ = GetValueForDebugger(stack_it, isolate);
+  stack_it++;
+
+  // Get the expression stack.
+  int stack_height = frame_it->height();
+  if (frame_it->kind() == TranslatedFrame::kFunction ||
+      frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
+    // For full-code frames, we should not count the context.
+    // For interpreter frames, we should not count the accumulator.
+    // TODO(jarin): Clean up the indexing in translated frames.
+    stack_height--;
+  }
+  expression_stack_.resize(static_cast<size_t>(stack_height));
+  for (int i = 0; i < stack_height; i++) {
+    Handle<Object> expression = GetValueForDebugger(stack_it, isolate);
+    SetExpression(i, expression);
+    stack_it++;
+  }
+
+  // For interpreter frame, skip the accumulator.
+  if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
+    stack_it++;
+  }
+  CHECK(stack_it == frame_it->end());
 }
 
 
@@ -2826,6 +2878,10 @@
   }
 }
 
+bool TranslatedValue::IsMaterializableByDebugger() const {
+  // At the moment, we only allow materialization of doubles.
+  return (kind() == kDouble);
+}
 
 int TranslatedValue::GetChildrenCount() const {
   if (kind() == kCapturedObject || kind() == kArgumentsObject) {
@@ -2906,8 +2962,8 @@
     case kInterpretedFunction: {
       int parameter_count =
           raw_shared_info_->internal_formal_parameter_count() + 1;
-      // + 3 for function, context and accumulator.
-      return height_ + parameter_count + 3;
+      // + 2 for function and context.
+      return height_ + parameter_count + 2;
     }
 
     case kGetter:
@@ -3058,7 +3114,6 @@
     case Translation::BOOL_STACK_SLOT:
     case Translation::DOUBLE_STACK_SLOT:
     case Translation::LITERAL:
-    case Translation::JS_FRAME_FUNCTION:
       break;
   }
   FATAL("We should never get here - unexpected deopt info.");
@@ -3261,16 +3316,6 @@
 
       return TranslatedValue::NewTagged(this, value);
     }
-
-    case Translation::JS_FRAME_FUNCTION: {
-      int slot_offset = JavaScriptFrameConstants::kFunctionOffset;
-      intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset));
-      if (trace_file != nullptr) {
-        PrintF(trace_file, "0x%08" V8PRIxPTR " ; (frame function) ", value);
-        reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
-      }
-      return TranslatedValue::NewTagged(this, reinterpret_cast<Object*>(value));
-    }
   }
 
   FATAL("We should never get here - unexpected deopt info.");
@@ -3385,7 +3430,7 @@
 Handle<Object> TranslatedState::MaterializeAt(int frame_index,
                                               int* value_index) {
   TranslatedFrame* frame = &(frames_[frame_index]);
-  DCHECK(static_cast<size_t>(*value_index) < frame->values_.size());
+  CHECK(static_cast<size_t>(*value_index) < frame->values_.size());
 
   TranslatedValue* slot = &(frame->values_[*value_index]);
   (*value_index)++;
@@ -3541,16 +3586,16 @@
       TranslatedState::ObjectPosition pos = object_positions_[object_index];
 
       // Make sure the duplicate is refering to a previous object.
-      DCHECK(pos.frame_index_ < frame_index ||
-             (pos.frame_index_ == frame_index &&
-              pos.value_index_ < *value_index - 1));
+      CHECK(pos.frame_index_ < frame_index ||
+            (pos.frame_index_ == frame_index &&
+             pos.value_index_ < *value_index - 1));
 
       Handle<Object> object =
           frames_[pos.frame_index_].values_[pos.value_index_].GetValue();
 
       // The object should have a (non-sentinel) value.
-      DCHECK(!object.is_null() &&
-             !object.is_identical_to(isolate_->factory()->arguments_marker()));
+      CHECK(!object.is_null() &&
+            !object.is_identical_to(isolate_->factory()->arguments_marker()));
 
       slot->value_ = object;
       return object;
@@ -3583,7 +3628,7 @@
     // recursive functions!)
     Handle<JSFunction> function =
         Handle<JSFunction>::cast(frames_[frame_index].front().GetValue());
-    *result = Handle<JSObject>::cast(Accessors::FunctionGetArguments(function));
+    *result = Accessors::FunctionGetArguments(function);
     return true;
   } else {
     TranslatedFrame* previous_frame = &(frames_[frame_index]);
@@ -3615,7 +3660,8 @@
 TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
     int jsframe_index, int* args_count) {
   for (size_t i = 0; i < frames_.size(); i++) {
-    if (frames_[i].kind() == TranslatedFrame::kFunction) {
+    if (frames_[i].kind() == TranslatedFrame::kFunction ||
+        frames_[i].kind() == TranslatedFrame::kInterpretedFunction) {
       if (jsframe_index > 0) {
         jsframe_index--;
       } else {
@@ -3654,7 +3700,7 @@
     new_store = true;
   }
 
-  DCHECK_EQ(length, previously_materialized_objects->length());
+  CHECK_EQ(length, previously_materialized_objects->length());
 
   bool value_changed = false;
   for (int i = 0; i < length; i++) {
@@ -3662,7 +3708,7 @@
     TranslatedValue* value_info =
         &(frames_[pos.frame_index_].values_[pos.value_index_]);
 
-    DCHECK(value_info->IsMaterializedObject());
+    CHECK(value_info->IsMaterializedObject());
 
     Handle<Object> value(value_info->GetRawValue(), isolate_);
 
@@ -3671,14 +3717,15 @@
         previously_materialized_objects->set(i, *value);
         value_changed = true;
       } else {
-        DCHECK(previously_materialized_objects->get(i) == *value);
+        CHECK(previously_materialized_objects->get(i) == *value);
       }
     }
   }
   if (new_store && value_changed) {
     materialized_store->Set(stack_frame_pointer_,
                             previously_materialized_objects);
-    DCHECK_EQ(TranslatedFrame::kFunction, frames_[0].kind());
+    CHECK(frames_[0].kind() == TranslatedFrame::kFunction ||
+          frames_[0].kind() == TranslatedFrame::kInterpretedFunction);
     Object* const function = frames_[0].front().GetRawValue();
     Deoptimizer::DeoptimizeFunction(JSFunction::cast(function));
   }
@@ -3697,7 +3744,7 @@
   Handle<Object> marker = isolate_->factory()->arguments_marker();
 
   int length = static_cast<int>(object_positions_.size());
-  DCHECK_EQ(length, previously_materialized_objects->length());
+  CHECK_EQ(length, previously_materialized_objects->length());
 
   for (int i = 0; i < length; i++) {
     // For a previously materialized objects, inject their value into the
@@ -3706,7 +3753,7 @@
       TranslatedState::ObjectPosition pos = object_positions_[i];
       TranslatedValue* value_info =
           &(frames_[pos.frame_index_].values_[pos.value_index_]);
-      DCHECK(value_info->IsMaterializedObject());
+      CHECK(value_info->IsMaterializedObject());
 
       value_info->value_ =
           Handle<Object>(previously_materialized_objects->get(i), isolate_);
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 10685b6..0259f01 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -27,6 +27,7 @@
   Handle<Object> GetValue();
 
   bool IsMaterializedObject() const;
+  bool IsMaterializableByDebugger() const;
 
  private:
   friend class TranslatedState;
@@ -128,6 +129,11 @@
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
   int height() const { return height_; }
 
+  SharedFunctionInfo* raw_shared_info() const {
+    CHECK_NOT_NULL(raw_shared_info_);
+    return raw_shared_info_;
+  }
+
   class iterator {
    public:
     iterator& operator++() {
@@ -503,10 +509,6 @@
 
   void MaterializeHeapObjects(JavaScriptFrameIterator* it);
 
-  void MaterializeHeapNumbersForDebuggerInspectableFrame(
-      int frame_index, int parameter_count, int expression_count,
-      DeoptimizedFrameInfo* info);
-
   static void ComputeOutputFrames(Deoptimizer* deoptimizer);
 
 
@@ -592,8 +594,8 @@
   void DeleteFrameDescriptions();
 
   void DoComputeOutputFrames();
-  void DoComputeJSFrame(int frame_index);
-  void DoComputeInterpretedFrame(int frame_index);
+  void DoComputeJSFrame(int frame_index, bool goto_catch_handler);
+  void DoComputeInterpretedFrame(int frame_index, bool goto_catch_handler);
   void DoComputeArgumentsAdaptorFrame(int frame_index);
   void DoComputeConstructStubFrame(int frame_index);
   void DoComputeAccessorStubFrame(int frame_index, bool is_setter_stub_frame);
@@ -611,10 +613,10 @@
                             const char* debug_hint_string);
 
   unsigned ComputeInputFrameSize() const;
-  unsigned ComputeJavascriptFixedSize(JSFunction* function) const;
-  unsigned ComputeInterpretedFixedSize(JSFunction* function) const;
+  static unsigned ComputeJavascriptFixedSize(SharedFunctionInfo* shared);
+  static unsigned ComputeInterpretedFixedSize(SharedFunctionInfo* shared);
 
-  unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
+  static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo* shared);
   static unsigned ComputeOutgoingArgumentSize(Code* code, unsigned bailout_id);
 
   Object* ComputeLiteral(int index) const;
@@ -640,11 +642,6 @@
   // searching all code objects).
   Code* FindDeoptimizingCode(Address addr);
 
-  // Fill the input from from a JavaScript frame. This is used when
-  // the debugger needs to inspect an optimized frame. For normal
-  // deoptimizations the input frame is filled in generated code.
-  void FillInputFrame(Address tos, JavaScriptFrame* frame);
-
   // Fill the given output frame's registers to contain the failure handler
   // address and the number of parameters for a stub failure trampoline.
   void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
@@ -656,7 +653,7 @@
 
   // Determines whether the input frame contains alignment padding by looking
   // at the dynamic alignment state slot inside the frame.
-  bool HasAlignmentPadding(JSFunction* function);
+  bool HasAlignmentPadding(SharedFunctionInfo* shared);
 
   Isolate* isolate_;
   JSFunction* function_;
@@ -666,6 +663,9 @@
   Address from_;
   int fp_to_sp_delta_;
   int has_alignment_padding_;
+  bool deoptimizing_throw_;
+  int catch_handler_data_;
+  int catch_handler_pc_offset_;
 
   // Input frame description.
   FrameDescription* input_;
@@ -736,8 +736,7 @@
 
 class FrameDescription {
  public:
-  FrameDescription(uint32_t frame_size,
-                   JSFunction* function);
+  explicit FrameDescription(uint32_t frame_size, int parameter_count = 0);
 
   void* operator new(size_t size, uint32_t frame_size) {
     // Subtracts kPointerSize, as the member frame_content_ already supplies
@@ -758,8 +757,6 @@
     return static_cast<uint32_t>(frame_size_);
   }
 
-  JSFunction* GetFunction() const { return function_; }
-
   unsigned GetOffsetFromSlotIndex(int slot_index);
 
   intptr_t GetFrameSlot(unsigned offset) {
@@ -767,8 +764,7 @@
   }
 
   Address GetFramePointerAddress() {
-    int fp_offset = GetFrameSize() -
-                    (ComputeParametersCount() + 1) * kPointerSize -
+    int fp_offset = GetFrameSize() - parameter_count() * kPointerSize -
                     StandardFrameConstants::kCallerSPOffset;
     return reinterpret_cast<Address>(GetFrameSlotPointer(fp_offset));
   }
@@ -826,17 +822,8 @@
   StackFrame::Type GetFrameType() const { return type_; }
   void SetFrameType(StackFrame::Type type) { type_ = type; }
 
-  // Get the incoming arguments count.
-  int ComputeParametersCount();
-
-  // Get a parameter value for an unoptimized frame.
-  Object* GetParameter(int index);
-
-  // Get the expression stack height for a unoptimized frame.
-  unsigned GetExpressionCount();
-
-  // Get the expression stack value for an unoptimized frame.
-  Object* GetExpression(int index);
+  // Argument count, including receiver.
+  int parameter_count() { return parameter_count_; }
 
   static int registers_offset() {
     return OFFSET_OF(FrameDescription, register_values_.registers_);
@@ -869,7 +856,7 @@
   // keep the variable-size array frame_content_ of type intptr_t at
   // the end of the structure aligned.
   uintptr_t frame_size_;  // Number of bytes.
-  JSFunction* function_;
+  int parameter_count_;
   RegisterValues register_values_;
   intptr_t top_;
   intptr_t pc_;
@@ -902,15 +889,11 @@
   explicit DeoptimizerData(MemoryAllocator* allocator);
   ~DeoptimizerData();
 
-  void Iterate(ObjectVisitor* v);
-
  private:
   MemoryAllocator* allocator_;
   int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
   MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
 
-  DeoptimizedFrameInfo* deoptimized_frame_info_;
-
   Deoptimizer* current_;
 
   friend class Deoptimizer;
@@ -953,7 +936,6 @@
   int index_;
 };
 
-
 #define TRANSLATION_OPCODE_LIST(V) \
   V(BEGIN)                         \
   V(JS_FRAME)                      \
@@ -976,9 +958,7 @@
   V(UINT32_STACK_SLOT)             \
   V(BOOL_STACK_SLOT)               \
   V(DOUBLE_STACK_SLOT)             \
-  V(LITERAL)                       \
-  V(JS_FRAME_FUNCTION)
-
+  V(LITERAL)
 
 class Translation BASE_EMBEDDED {
  public:
@@ -1071,28 +1051,20 @@
 // formal parameter count.
 class DeoptimizedFrameInfo : public Malloced {
  public:
-  DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
-                       int frame_index,
-                       bool has_arguments_adaptor,
-                       bool has_construct_stub);
-  virtual ~DeoptimizedFrameInfo();
-
-  // GC support.
-  void Iterate(ObjectVisitor* v);
+  DeoptimizedFrameInfo(TranslatedState* state,
+                       TranslatedState::iterator frame_it, Isolate* isolate);
 
   // Return the number of incoming arguments.
-  int parameters_count() { return parameters_count_; }
+  int parameters_count() { return static_cast<int>(parameters_.size()); }
 
   // Return the height of the expression stack.
-  int expression_count() { return expression_count_; }
+  int expression_count() { return static_cast<int>(expression_stack_.size()); }
 
   // Get the frame function.
-  JSFunction* GetFunction() {
-    return function_;
-  }
+  Handle<JSFunction> GetFunction() { return function_; }
 
   // Get the frame context.
-  Object* GetContext() { return context_; }
+  Handle<Object> GetContext() { return context_; }
 
   // Check if this frame is preceded by construct stub frame.  The bottom-most
   // inlined frame might still be called by an uninlined construct stub.
@@ -1101,13 +1073,13 @@
   }
 
   // Get an incoming argument.
-  Object* GetParameter(int index) {
+  Handle<Object> GetParameter(int index) {
     DCHECK(0 <= index && index < parameters_count());
     return parameters_[index];
   }
 
   // Get an expression from the expression stack.
-  Object* GetExpression(int index) {
+  Handle<Object> GetExpression(int index) {
     DCHECK(0 <= index && index < expression_count());
     return expression_stack_[index];
   }
@@ -1118,24 +1090,22 @@
 
  private:
   // Set an incoming argument.
-  void SetParameter(int index, Object* obj) {
+  void SetParameter(int index, Handle<Object> obj) {
     DCHECK(0 <= index && index < parameters_count());
     parameters_[index] = obj;
   }
 
   // Set an expression on the expression stack.
-  void SetExpression(int index, Object* obj) {
+  void SetExpression(int index, Handle<Object> obj) {
     DCHECK(0 <= index && index < expression_count());
     expression_stack_[index] = obj;
   }
 
-  JSFunction* function_;
-  Object* context_;
+  Handle<JSFunction> function_;
+  Handle<Object> context_;
   bool has_construct_stub_;
-  int parameters_count_;
-  int expression_count_;
-  Object** parameters_;
-  Object** expression_stack_;
+  std::vector<Handle<Object> > parameters_;
+  std::vector<Handle<Object> > expression_stack_;
   int source_position_;
 
   friend class Deoptimizer;
diff --git a/src/elements-kind.cc b/src/elements-kind.cc
index 0d29c30..7bb75c4 100644
--- a/src/elements-kind.cc
+++ b/src/elements-kind.cc
@@ -37,7 +37,12 @@
     case DICTIONARY_ELEMENTS:
     case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
     case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+    case FAST_STRING_WRAPPER_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS:
       return kPointerSizeLog2;
+    case NO_ELEMENTS:
+      UNREACHABLE();
+      return 0;
   }
   UNREACHABLE();
   return 0;
diff --git a/src/elements-kind.h b/src/elements-kind.h
index 5f6cd62..3ebc9ad 100644
--- a/src/elements-kind.h
+++ b/src/elements-kind.h
@@ -30,10 +30,16 @@
   // The "slow" kind.
   DICTIONARY_ELEMENTS,
 
+  // Elements kind of the "arguments" object (only in sloppy mode).
   FAST_SLOPPY_ARGUMENTS_ELEMENTS,
   SLOW_SLOPPY_ARGUMENTS_ELEMENTS,
 
-  // Fixed typed arrays
+  // For string wrapper objects ("new String('...')"), the string's characters
+  // are overlaid onto a regular elements backing store.
+  FAST_STRING_WRAPPER_ELEMENTS,
+  SLOW_STRING_WRAPPER_ELEMENTS,
+
+  // Fixed typed arrays.
   UINT8_ELEMENTS,
   INT8_ELEMENTS,
   UINT16_ELEMENTS,
@@ -44,7 +50,10 @@
   FLOAT64_ELEMENTS,
   UINT8_CLAMPED_ELEMENTS,
 
-  // Derived constants from ElementsKind
+  // Sentinel ElementsKind for objects with no elements.
+  NO_ELEMENTS,
+
+  // Derived constants from ElementsKind.
   FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
   LAST_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
   FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
@@ -83,6 +92,10 @@
          kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
 }
 
+inline bool IsStringWrapperElementsKind(ElementsKind kind) {
+  return kind == FAST_STRING_WRAPPER_ELEMENTS ||
+         kind == SLOW_STRING_WRAPPER_ELEMENTS;
+}
 
 inline bool IsFixedTypedArrayElementsKind(ElementsKind kind) {
   return kind >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
@@ -104,7 +117,8 @@
 
 inline bool IsTransitionElementsKind(ElementsKind kind) {
   return IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind) ||
-         kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+         kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS ||
+         kind == FAST_STRING_WRAPPER_ELEMENTS;
 }
 
 
diff --git a/src/elements.cc b/src/elements.cc
index d4d80db..9fd450a 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -40,7 +40,9 @@
 //   - SloppyArgumentsElementsAccessor
 //     - FastSloppyArgumentsElementsAccessor
 //     - SlowSloppyArgumentsElementsAccessor
-
+//   - StringWrapperElementsAccessor
+//     - FastStringWrapperElementsAccessor
+//     - SlowStringWrapperElementsAccessor
 
 namespace v8 {
 namespace internal {
@@ -72,6 +74,10 @@
     FixedArray)                                                               \
   V(SlowSloppyArgumentsElementsAccessor, SLOW_SLOPPY_ARGUMENTS_ELEMENTS,      \
     FixedArray)                                                               \
+  V(FastStringWrapperElementsAccessor, FAST_STRING_WRAPPER_ELEMENTS,          \
+    FixedArray)                                                               \
+  V(SlowStringWrapperElementsAccessor, SLOW_STRING_WRAPPER_ELEMENTS,          \
+    FixedArray)                                                               \
   V(FixedUint8ElementsAccessor, UINT8_ELEMENTS, FixedUint8Array)              \
   V(FixedInt8ElementsAccessor, INT8_ELEMENTS, FixedInt8Array)                 \
   V(FixedUint16ElementsAccessor, UINT16_ELEMENTS, FixedUint16Array)           \
@@ -83,7 +89,6 @@
   V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS,                \
     FixedUint8ClampedArray)
 
-
 template<ElementsKind Kind> class ElementsKindTraits {
  public:
   typedef FixedArrayBase BackingStore;
@@ -134,11 +139,14 @@
   if (copy_size == 0) return;
   FixedArray* from = FixedArray::cast(from_base);
   FixedArray* to = FixedArray::cast(to_base);
-  DCHECK(IsFastSmiOrObjectElementsKind(from_kind));
+  DCHECK(IsFastSmiOrObjectElementsKind(from_kind) ||
+         from_kind == FAST_STRING_WRAPPER_ELEMENTS);
   DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
 
   WriteBarrierMode write_barrier_mode =
-      (IsFastObjectElementsKind(from_kind) && IsFastObjectElementsKind(to_kind))
+      ((IsFastObjectElementsKind(from_kind) &&
+        IsFastObjectElementsKind(to_kind)) ||
+       from_kind == FAST_STRING_WRAPPER_ELEMENTS)
           ? UPDATE_WRITE_BARRIER
           : SKIP_WRITE_BARRIER;
   for (int i = 0; i < copy_size; i++) {
@@ -230,15 +238,16 @@
   Handle<FixedDoubleArray> from(FixedDoubleArray::cast(from_base), isolate);
   Handle<FixedArray> to(FixedArray::cast(to_base), isolate);
 
-  // create an outer loop to not waste too much time on creating HandleScopes
-  // on the other hand we might overflow a single handle scope depending on
-  // the copy_size
+  // Use an outer loop to not waste too much time on creating HandleScopes.
+  // On the other hand we might overflow a single handle scope depending on
+  // the copy_size.
   int offset = 0;
   while (offset < copy_size) {
     HandleScope scope(isolate);
     offset += 100;
     for (int i = offset - 100; i < offset && i < copy_size; ++i) {
-      Handle<Object> value = FixedDoubleArray::get(from, i + from_start);
+      Handle<Object> value =
+          FixedDoubleArray::get(*from, i + from_start, isolate);
       to->set(i + to_start, *value, UPDATE_WRITE_BARRIER);
     }
   }
@@ -545,30 +554,32 @@
                *holder, *backing_store, index, filter) != kMaxUInt32;
   }
 
-  Handle<Object> Get(Handle<FixedArrayBase> backing_store,
-                     uint32_t entry) final {
-    return ElementsAccessorSubclass::GetImpl(backing_store, entry);
+  bool HasAccessors(JSObject* holder) final {
+    return ElementsAccessorSubclass::HasAccessorsImpl(holder,
+                                                      holder->elements());
   }
 
-  static Handle<Object> GetImpl(Handle<FixedArrayBase> backing_store,
-                                uint32_t entry) {
-    uint32_t index = GetIndexForEntryImpl(*backing_store, entry);
-    return BackingStore::get(Handle<BackingStore>::cast(backing_store), index);
+  static bool HasAccessorsImpl(JSObject* holder,
+                               FixedArrayBase* backing_store) {
+    return false;
   }
 
-  void Set(FixedArrayBase* backing_store, uint32_t entry, Object* value) final {
-    ElementsAccessorSubclass::SetImpl(backing_store, entry, value);
+  Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) final {
+    return ElementsAccessorSubclass::GetImpl(holder, entry);
   }
 
-  static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
-                             Object* value) {
-    UNREACHABLE();
+  static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+    return ElementsAccessorSubclass::GetImpl(holder->elements(), entry);
   }
 
+  static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
+    Isolate* isolate = backing_store->GetIsolate();
+    uint32_t index = GetIndexForEntryImpl(backing_store, entry);
+    return handle(BackingStore::cast(backing_store)->get(index), isolate);
+  }
 
-  static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
-                             Object* value, WriteBarrierMode mode) {
-    UNREACHABLE();
+  void Set(Handle<JSObject> holder, uint32_t entry, Object* value) final {
+    ElementsAccessorSubclass::SetImpl(holder, entry, value);
   }
 
   void Reconfigure(Handle<JSObject> object, Handle<FixedArrayBase> store,
@@ -780,6 +791,7 @@
     DCHECK(IsFastDoubleElementsKind(from_kind) !=
                IsFastDoubleElementsKind(kind()) ||
            IsDictionaryElementsKind(from_kind) ||
+           from_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
            static_cast<uint32_t>(old_elements->length()) < capacity);
     Handle<FixedArrayBase> elements =
         ConvertElementsWithCapacity(object, old_elements, from_kind, capacity);
@@ -814,21 +826,6 @@
     UNREACHABLE();
   }
 
-  void CopyElements(Handle<FixedArrayBase> from, uint32_t from_start,
-                    ElementsKind from_kind, Handle<FixedArrayBase> to,
-                    uint32_t to_start, int copy_size) final {
-    DCHECK(!from.is_null());
-    // NOTE: the ElementsAccessorSubclass::CopyElementsImpl() methods
-    // violate the handlified function signature convention:
-    // raw pointer parameters in the function that allocates. This is done
-    // intentionally to avoid ArrayConcat() builtin performance degradation.
-    // See the comment in another ElementsAccessorBase::CopyElements() for
-    // details.
-    ElementsAccessorSubclass::CopyElementsImpl(*from, from_start, *to,
-                                               from_kind, to_start,
-                                               kPackedSizeNotKnown, copy_size);
-  }
-
   void CopyElements(JSObject* from_holder, uint32_t from_start,
                     ElementsKind from_kind, Handle<FixedArrayBase> to,
                     uint32_t to_start, int copy_size) final {
@@ -861,6 +858,7 @@
                                         KeyAccumulator* keys, uint32_t range,
                                         PropertyFilter filter,
                                         uint32_t offset) {
+    DCHECK_NE(DICTIONARY_ELEMENTS, kind());
     if (filter & ONLY_ALL_CAN_READ) {
       // Non-dictionary elements can't have all-can-read accessors.
       return;
@@ -875,8 +873,9 @@
     if (range < length) length = range;
     for (uint32_t i = offset; i < length; i++) {
       if (!ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
-                                                    filter))
+                                                    filter)) {
         continue;
+      }
       keys->AddKey(i);
     }
   }
@@ -892,19 +891,8 @@
   void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
                                    KeyAccumulator* accumulator,
                                    AddKeyConversion convert) final {
-    Handle<FixedArrayBase> from(receiver->elements());
-    uint32_t add_length =
-        ElementsAccessorSubclass::GetCapacityImpl(*receiver, *from);
-    if (add_length == 0) return;
-
-    for (uint32_t i = 0; i < add_length; i++) {
-      if (!ElementsAccessorSubclass::HasEntryImpl(*from, i)) continue;
-      Handle<Object> value = ElementsAccessorSubclass::GetImpl(from, i);
-      DCHECK(!value->IsTheHole());
-      DCHECK(!value->IsAccessorPair());
-      DCHECK(!value->IsExecutableAccessorInfo());
-      accumulator->AddKey(value, convert);
-    }
+    ElementsAccessorSubclass::AddElementsToKeyAccumulatorImpl(
+        receiver, accumulator, convert);
   }
 
   static uint32_t GetCapacityImpl(JSObject* holder,
@@ -916,10 +904,6 @@
     return ElementsAccessorSubclass::GetCapacityImpl(holder, backing_store);
   }
 
-  static bool HasEntryImpl(FixedArrayBase* backing_store, uint32_t entry) {
-    return true;
-  }
-
   static uint32_t GetIndexForEntryImpl(FixedArrayBase* backing_store,
                                        uint32_t entry) {
     return entry;
@@ -956,9 +940,12 @@
     return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
   }
 
-  PropertyDetails GetDetails(FixedArrayBase* backing_store,
-                             uint32_t entry) final {
-    return ElementsAccessorSubclass::GetDetailsImpl(backing_store, entry);
+  static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+    return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
+  }
+
+  PropertyDetails GetDetails(JSObject* holder, uint32_t entry) final {
+    return ElementsAccessorSubclass::GetDetailsImpl(holder, entry);
   }
 
  private:
@@ -1048,20 +1035,42 @@
     obj->set_elements(*new_elements);
   }
 
+  static bool HasAccessorsImpl(JSObject* holder,
+                               FixedArrayBase* backing_store) {
+    SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
+    if (!dict->requires_slow_elements()) return false;
+    int capacity = dict->Capacity();
+    for (int i = 0; i < capacity; i++) {
+      Object* key = dict->KeyAt(i);
+      if (!dict->IsKey(key)) continue;
+      DCHECK(!dict->IsDeleted(i));
+      PropertyDetails details = dict->DetailsAt(i);
+      if (details.type() == ACCESSOR_CONSTANT) return true;
+    }
+    return false;
+  }
+
   static Object* GetRaw(FixedArrayBase* store, uint32_t entry) {
     SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
     return backing_store->ValueAt(entry);
   }
 
-  static Handle<Object> GetImpl(Handle<FixedArrayBase> store, uint32_t entry) {
-    Isolate* isolate = store->GetIsolate();
-    return handle(GetRaw(*store, entry), isolate);
+  static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+    return GetImpl(holder->elements(), entry);
   }
 
-  static inline void SetImpl(FixedArrayBase* store, uint32_t entry,
+  static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
+    return handle(GetRaw(backing_store, entry), backing_store->GetIsolate());
+  }
+
+  static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
                              Object* value) {
-    SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(store);
-    dictionary->ValueAtPut(entry, value);
+    SetImpl(holder->elements(), entry, value);
+  }
+
+  static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+                             Object* value) {
+    SeededNumberDictionary::cast(backing_store)->ValueAtPut(entry, value);
   }
 
   static void ReconfigureImpl(Handle<JSObject> object,
@@ -1082,7 +1091,7 @@
                       uint32_t new_capacity) {
     PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
     Handle<SeededNumberDictionary> dictionary =
-        object->HasFastElements()
+        object->HasFastElements() || object->HasFastStringWrapperElements()
             ? JSObject::NormalizeElements(object)
             : handle(SeededNumberDictionary::cast(object->elements()));
     Handle<SeededNumberDictionary> new_dictionary =
@@ -1123,6 +1132,10 @@
     return static_cast<uint32_t>(entry);
   }
 
+  static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+    return GetDetailsImpl(holder->elements(), entry);
+  }
+
   static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
                                         uint32_t entry) {
     return SeededNumberDictionary::cast(backing_store)->DetailsAt(entry);
@@ -1159,6 +1172,24 @@
 
     keys->SortCurrentElementsList();
   }
+
+  static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+                                              KeyAccumulator* accumulator,
+                                              AddKeyConversion convert) {
+    SeededNumberDictionary* dictionary =
+        SeededNumberDictionary::cast(receiver->elements());
+    int capacity = dictionary->Capacity();
+    for (int i = 0; i < capacity; i++) {
+      Object* k = dictionary->KeyAt(i);
+      if (!dictionary->IsKey(k)) continue;
+      if (dictionary->IsDeleted(i)) continue;
+      Object* value = dictionary->ValueAt(i);
+      DCHECK(!value->IsTheHole());
+      DCHECK(!value->IsAccessorPair());
+      DCHECK(!value->IsAccessorInfo());
+      accumulator->AddKey(value, convert);
+    }
+  }
 };
 
 
@@ -1197,9 +1228,9 @@
 
   static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
                            Handle<FixedArrayBase> store) {
-    DCHECK(obj->HasFastSmiOrObjectElements() ||
-           obj->HasFastDoubleElements() ||
-           obj->HasFastArgumentsElements());
+    DCHECK(obj->HasFastSmiOrObjectElements() || obj->HasFastDoubleElements() ||
+           obj->HasFastArgumentsElements() ||
+           obj->HasFastStringWrapperElements());
     Handle<BackingStore> backing_store = Handle<BackingStore>::cast(store);
     if (!obj->IsJSArray() &&
         entry == static_cast<uint32_t>(store->length()) - 1) {
@@ -1277,7 +1308,7 @@
       FastElementsAccessorSubclass::GrowCapacityAndConvertImpl(object,
                                                                new_capacity);
     } else {
-      if (from_kind != to_kind) {
+      if (IsFastElementsKind(from_kind) && from_kind != to_kind) {
         JSObject::TransitionElementsKind(object, to_kind);
       }
       if (IsFastSmiOrObjectElementsKind(from_kind)) {
@@ -1285,7 +1316,7 @@
         JSObject::EnsureWritableFastElements(object);
       }
     }
-    FastElementsAccessorSubclass::SetImpl(object->elements(), index, *value);
+    FastElementsAccessorSubclass::SetImpl(object, index, *value);
   }
 
   static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
@@ -1303,6 +1334,27 @@
     return !BackingStore::cast(backing_store)->is_the_hole(entry);
   }
 
+  static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+                                              KeyAccumulator* accumulator,
+                                              AddKeyConversion convert) {
+    uint32_t length = 0;
+    Handle<FixedArrayBase> elements(receiver->elements(),
+                                    receiver->GetIsolate());
+    if (receiver->IsJSArray()) {
+      length = Smi::cast(JSArray::cast(*receiver)->length())->value();
+    } else {
+      length =
+          FastElementsAccessorSubclass::GetCapacityImpl(*receiver, *elements);
+    }
+    for (uint32_t i = 0; i < length; i++) {
+      if (IsFastPackedElementsKind(KindTraits::Kind) ||
+          HasEntryImpl(*elements, i)) {
+        accumulator->AddKey(FastElementsAccessorSubclass::GetImpl(*elements, i),
+                            convert);
+      }
+    }
+  }
+
   static void ValidateContents(Handle<JSObject> holder, int length) {
 #if DEBUG
     Isolate* isolate = holder->GetIsolate();
@@ -1320,7 +1372,7 @@
     Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
     if (IsFastSmiElementsKind(KindTraits::Kind)) {
       for (int i = 0; i < length; i++) {
-        DCHECK(BackingStore::get(backing_store, i)->IsSmi() ||
+        DCHECK(BackingStore::get(*backing_store, i, isolate)->IsSmi() ||
                (IsFastHoleyElementsKind(KindTraits::Kind) &&
                 backing_store->is_the_hole(i)));
       }
@@ -1480,7 +1532,7 @@
     int new_length = length - 1;
     int remove_index = remove_position == AT_START ? 0 : new_length;
     Handle<Object> result =
-        FastElementsAccessorSubclass::GetImpl(backing_store, remove_index);
+        FastElementsAccessorSubclass::GetImpl(*backing_store, remove_index);
     if (remove_position == AT_START) {
       FastElementsAccessorSubclass::MoveElements(
           isolate, receiver, backing_store, 0, 1, new_length, 0, 0);
@@ -1557,6 +1609,11 @@
       : FastElementsAccessor<FastElementsAccessorSubclass,
                              KindTraits>(name) {}
 
+  static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+                             Object* value) {
+    SetImpl(holder->elements(), entry, value);
+  }
+
   static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
                              Object* value) {
     FixedArray::cast(backing_store)->set(entry, value);
@@ -1613,6 +1670,7 @@
       case FAST_HOLEY_SMI_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_HOLEY_ELEMENTS:
+      case FAST_STRING_WRAPPER_ELEMENTS:
         CopyObjectToObjectElements(from, from_kind, from_start, to, to_kind,
                                    to_start, copy_size);
         break;
@@ -1624,17 +1682,21 @@
         break;
       }
       case DICTIONARY_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
         CopyDictionaryToObjectElements(from, from_start, to, to_kind, to_start,
                                        copy_size);
         break;
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
-        UNREACHABLE();
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)                       \
-      case TYPE##_ELEMENTS:                                                   \
-        UNREACHABLE();
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
       TYPED_ARRAYS(TYPED_ARRAY_CASE)
 #undef TYPED_ARRAY_CASE
+      // This function is currently only used for JSArrays with non-zero
+      // length.
+      UNREACHABLE();
+      break;
+      case NO_ELEMENTS:
+        break;  // Nothing to do.
     }
   }
 };
@@ -1697,6 +1759,21 @@
       : FastElementsAccessor<FastElementsAccessorSubclass,
                              KindTraits>(name) {}
 
+  static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+    return GetImpl(holder->elements(), entry);
+  }
+
+  static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
+    Isolate* isolate = backing_store->GetIsolate();
+    return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store), entry,
+                                 isolate);
+  }
+
+  static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+                             Object* value) {
+    SetImpl(holder->elements(), entry, value);
+  }
+
   static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
                              Object* value) {
     FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
@@ -1759,13 +1836,16 @@
         break;
       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
-        UNREACHABLE();
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)                       \
-      case TYPE##_ELEMENTS:                                                   \
-        UNREACHABLE();
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+      case NO_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
       TYPED_ARRAYS(TYPED_ARRAY_CASE)
 #undef TYPED_ARRAY_CASE
+      // This function is currently only used for JSArrays with non-zero
+      // length.
+      UNREACHABLE();
+      break;
     }
   }
 };
@@ -1808,6 +1888,11 @@
   typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
   typedef TypedElementsAccessor<Kind> AccessorClass;
 
+  static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+                             Object* value) {
+    SetImpl(holder->elements(), entry, value);
+  }
+
   static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
                              Object* value) {
     BackingStore::cast(backing_store)->SetValue(entry, value);
@@ -1818,10 +1903,16 @@
     BackingStore::cast(backing_store)->SetValue(entry, value);
   }
 
-  static Handle<Object> GetImpl(Handle<FixedArrayBase> backing_store,
-                                uint32_t entry) {
-    uint32_t index = GetIndexForEntryImpl(*backing_store, entry);
-    return BackingStore::get(Handle<BackingStore>::cast(backing_store), index);
+  static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+    return GetImpl(holder->elements(), entry);
+  }
+
+  static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
+    return BackingStore::get(BackingStore::cast(backing_store), entry);
+  }
+
+  static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+    return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
   }
 
   static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
@@ -1829,6 +1920,17 @@
     return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
   }
 
+  static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
+                             Handle<FixedArrayBase> backing_store,
+                             PropertyFilter filter) {
+    return index < AccessorClass::GetCapacityImpl(*holder, *backing_store);
+  }
+
+  static bool HasAccessorsImpl(JSObject* holder,
+                               FixedArrayBase* backing_store) {
+    return false;
+  }
+
   static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
                             uint32_t length,
                             Handle<FixedArrayBase> backing_store) {
@@ -1859,6 +1961,18 @@
     if (view->WasNeutered()) return 0;
     return backing_store->length();
   }
+
+  static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+                                              KeyAccumulator* accumulator,
+                                              AddKeyConversion convert) {
+    Handle<FixedArrayBase> elements(receiver->elements(),
+                                    receiver->GetIsolate());
+    uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements);
+    for (uint32_t i = 0; i < length; i++) {
+      Handle<Object> value = AccessorClass::GetImpl(*elements, i);
+      accumulator->AddKey(value, convert);
+    }
+  }
 };
 
 
@@ -1883,10 +1997,13 @@
     USE(KindTraits::Kind);
   }
 
-  static Handle<Object> GetImpl(Handle<FixedArrayBase> parameters,
-                                uint32_t entry) {
+  static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+    return GetImpl(holder->elements(), entry);
+  }
+
+  static Handle<Object> GetImpl(FixedArrayBase* parameters, uint32_t entry) {
     Isolate* isolate = parameters->GetIsolate();
-    Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
+    Handle<FixedArray> parameter_map(FixedArray::cast(parameters), isolate);
     uint32_t length = parameter_map->length() - 2;
     if (entry < length) {
       DisallowHeapAllocation no_gc;
@@ -1897,10 +2014,8 @@
       return handle(context->get(context_entry), isolate);
     } else {
       // Object is not mapped, defer to the arguments.
-      Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)),
-                                   isolate);
-      Handle<Object> result =
-          ArgumentsAccessor::GetImpl(arguments, entry - length);
+      Handle<Object> result = ArgumentsAccessor::GetImpl(
+          FixedArray::cast(parameter_map->get(1)), entry - length);
       // Elements of the arguments object in slow mode might be slow aliases.
       if (result->IsAliasedArgumentsEntry()) {
         DisallowHeapAllocation no_gc;
@@ -1919,6 +2034,11 @@
     UNREACHABLE();
   }
 
+  static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+                             Object* value) {
+    SetImpl(holder->elements(), entry, value);
+  }
+
   static inline void SetImpl(FixedArrayBase* store, uint32_t entry,
                              Object* value) {
     FixedArray* parameter_map = FixedArray::cast(store);
@@ -1959,6 +2079,18 @@
            ArgumentsAccessor::GetCapacityImpl(holder, arguments);
   }
 
+  static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+                                              KeyAccumulator* accumulator,
+                                              AddKeyConversion convert) {
+    FixedArrayBase* elements = receiver->elements();
+    uint32_t length = GetCapacityImpl(*receiver, elements);
+    for (uint32_t entry = 0; entry < length; entry++) {
+      if (!HasEntryImpl(elements, entry)) continue;
+      Handle<Object> value = GetImpl(elements, entry);
+      accumulator->AddKey(value, convert);
+    }
+  }
+
   static bool HasEntryImpl(FixedArrayBase* parameters, uint32_t entry) {
     FixedArray* parameter_map = FixedArray::cast(parameters);
     uint32_t length = parameter_map->length() - 2;
@@ -1970,6 +2102,13 @@
     return ArgumentsAccessor::HasEntryImpl(arguments, entry - length);
   }
 
+  static bool HasAccessorsImpl(JSObject* holder,
+                               FixedArrayBase* backing_store) {
+    FixedArray* parameter_map = FixedArray::cast(backing_store);
+    FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+    return ArgumentsAccessor::HasAccessorsImpl(holder, arguments);
+  }
+
   static uint32_t GetIndexForEntryImpl(FixedArrayBase* parameters,
                                        uint32_t entry) {
     FixedArray* parameter_map = FixedArray::cast(parameters);
@@ -1994,9 +2133,8 @@
     return (parameter_map->length() - 2) + entry;
   }
 
-  static PropertyDetails GetDetailsImpl(FixedArrayBase* parameters,
-                                        uint32_t entry) {
-    FixedArray* parameter_map = FixedArray::cast(parameters);
+  static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+    FixedArray* parameter_map = FixedArray::cast(holder->elements());
     uint32_t length = parameter_map->length() - 2;
     if (entry < length) {
       return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
@@ -2201,6 +2339,172 @@
   }
 };
 
+template <typename StringWrapperElementsAccessorSubclass,
+          typename BackingStoreAccessor, typename KindTraits>
+class StringWrapperElementsAccessor
+    : public ElementsAccessorBase<StringWrapperElementsAccessorSubclass,
+                                  KindTraits> {
+ public:
+  explicit StringWrapperElementsAccessor(const char* name)
+      : ElementsAccessorBase<StringWrapperElementsAccessorSubclass, KindTraits>(
+            name) {
+    USE(KindTraits::Kind);
+  }
+
+  static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+    Isolate* isolate = holder->GetIsolate();
+    Handle<String> string(GetString(*holder), isolate);
+    uint32_t length = static_cast<uint32_t>(string->length());
+    if (entry < length) {
+      return isolate->factory()->LookupSingleCharacterStringFromCode(
+          String::Flatten(string)->Get(entry));
+    }
+    return BackingStoreAccessor::GetImpl(holder, entry - length);
+  }
+
+  static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+    uint32_t length = static_cast<uint32_t>(GetString(holder)->length());
+    if (entry < length) {
+      PropertyAttributes attributes =
+          static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+      return PropertyDetails(attributes, v8::internal::DATA, 0,
+                             PropertyCellType::kNoCell);
+    }
+    return BackingStoreAccessor::GetDetailsImpl(holder, entry - length);
+  }
+
+  static uint32_t GetEntryForIndexImpl(JSObject* holder,
+                                       FixedArrayBase* backing_store,
+                                       uint32_t index, PropertyFilter filter) {
+    uint32_t length = static_cast<uint32_t>(GetString(holder)->length());
+    if (index < length) return index;
+    uint32_t backing_store_entry = BackingStoreAccessor::GetEntryForIndexImpl(
+        holder, backing_store, index, filter);
+    if (backing_store_entry == kMaxUInt32) return kMaxUInt32;
+    DCHECK(backing_store_entry < kMaxUInt32 - length);
+    return backing_store_entry + length;
+  }
+
+  static void DeleteImpl(Handle<JSObject> holder, uint32_t entry) {
+    uint32_t length = static_cast<uint32_t>(GetString(*holder)->length());
+    if (entry < length) {
+      return;  // String contents can't be deleted.
+    }
+    BackingStoreAccessor::DeleteImpl(holder, entry - length);
+  }
+
+  static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object* value) {
+    uint32_t length = static_cast<uint32_t>(GetString(*holder)->length());
+    if (entry < length) {
+      return;  // String contents are read-only.
+    }
+    BackingStoreAccessor::SetImpl(holder->elements(), entry - length, value);
+  }
+
+  static void AddImpl(Handle<JSObject> object, uint32_t index,
+                      Handle<Object> value, PropertyAttributes attributes,
+                      uint32_t new_capacity) {
+    DCHECK(index >= static_cast<uint32_t>(GetString(*object)->length()));
+    // Explicitly grow fast backing stores if needed. Dictionaries know how to
+    // extend their capacity themselves.
+    if (KindTraits::Kind == FAST_STRING_WRAPPER_ELEMENTS &&
+        (object->GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS ||
+         BackingStoreAccessor::GetCapacityImpl(*object, object->elements()) !=
+             new_capacity)) {
+      StringWrapperElementsAccessorSubclass::GrowCapacityAndConvertImpl(
+          object, new_capacity);
+    }
+    BackingStoreAccessor::AddImpl(object, index, value, attributes,
+                                  new_capacity);
+  }
+
+  static void ReconfigureImpl(Handle<JSObject> object,
+                              Handle<FixedArrayBase> store, uint32_t entry,
+                              Handle<Object> value,
+                              PropertyAttributes attributes) {
+    uint32_t length = static_cast<uint32_t>(GetString(*object)->length());
+    if (entry < length) {
+      return;  // String contents can't be reconfigured.
+    }
+    BackingStoreAccessor::ReconfigureImpl(object, store, entry - length, value,
+                                          attributes);
+  }
+
+  static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+                                              KeyAccumulator* accumulator,
+                                              AddKeyConversion convert) {
+    Isolate* isolate = receiver->GetIsolate();
+    Handle<String> string(GetString(*receiver), isolate);
+    string = String::Flatten(string);
+    uint32_t length = static_cast<uint32_t>(string->length());
+    for (uint32_t i = 0; i < length; i++) {
+      accumulator->AddKey(
+          isolate->factory()->LookupSingleCharacterStringFromCode(
+              string->Get(i)),
+          convert);
+    }
+    BackingStoreAccessor::AddElementsToKeyAccumulatorImpl(receiver, accumulator,
+                                                          convert);
+  }
+
+  static void CollectElementIndicesImpl(Handle<JSObject> object,
+                                        Handle<FixedArrayBase> backing_store,
+                                        KeyAccumulator* keys, uint32_t range,
+                                        PropertyFilter filter,
+                                        uint32_t offset) {
+    if ((filter & ONLY_ALL_CAN_READ) == 0) {
+      uint32_t length = GetString(*object)->length();
+      for (uint32_t i = 0; i < length; i++) {
+        keys->AddKey(i);
+      }
+    }
+    BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store, keys,
+                                                    range, filter, offset);
+  }
+
+  static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
+                               FixedArrayBase* to, ElementsKind from_kind,
+                               uint32_t to_start, int packed_size,
+                               int copy_size) {
+    BackingStoreAccessor::CopyElementsImpl(from, from_start, to, from_kind,
+                                           to_start, packed_size, copy_size);
+  }
+
+ private:
+  static String* GetString(JSObject* holder) {
+    DCHECK(holder->IsJSValue());
+    JSValue* js_value = JSValue::cast(holder);
+    DCHECK(js_value->value()->IsString());
+    return String::cast(js_value->value());
+  }
+};
+
+class FastStringWrapperElementsAccessor
+    : public StringWrapperElementsAccessor<
+          FastStringWrapperElementsAccessor, FastHoleyObjectElementsAccessor,
+          ElementsKindTraits<FAST_STRING_WRAPPER_ELEMENTS>> {
+ public:
+  explicit FastStringWrapperElementsAccessor(const char* name)
+      : StringWrapperElementsAccessor<
+            FastStringWrapperElementsAccessor, FastHoleyObjectElementsAccessor,
+            ElementsKindTraits<FAST_STRING_WRAPPER_ELEMENTS>>(name) {}
+};
+
+class SlowStringWrapperElementsAccessor
+    : public StringWrapperElementsAccessor<
+          SlowStringWrapperElementsAccessor, DictionaryElementsAccessor,
+          ElementsKindTraits<SLOW_STRING_WRAPPER_ELEMENTS>> {
+ public:
+  explicit SlowStringWrapperElementsAccessor(const char* name)
+      : StringWrapperElementsAccessor<
+            SlowStringWrapperElementsAccessor, DictionaryElementsAccessor,
+            ElementsKindTraits<SLOW_STRING_WRAPPER_ELEMENTS>>(name) {}
+
+  static bool HasAccessorsImpl(JSObject* holder,
+                               FixedArrayBase* backing_store) {
+    return DictionaryElementsAccessor::HasAccessorsImpl(holder, backing_store);
+  }
+};
 
 }  // namespace
 
diff --git a/src/elements.h b/src/elements.h
index 71e70a1..483d753 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -29,8 +29,6 @@
     return elements_accessors_[elements_kind];
   }
 
-  static ElementsAccessor* ForArray(Handle<FixedArrayBase> array);
-
   // Checks the elements of an object for consistency, asserting when a problem
   // is found.
   virtual void Validate(Handle<JSObject> obj) = 0;
@@ -59,8 +57,10 @@
                         Handle<FixedArrayBase> backing_store, uint32_t start,
                         uint32_t end) = 0;
 
-  virtual Handle<Object> Get(Handle<FixedArrayBase> backing_store,
-                             uint32_t entry) = 0;
+  virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0;
+
+  virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
+  virtual bool HasAccessors(JSObject* holder) = 0;
 
   // Modifies the length data property as specified for JSArrays and resizes the
   // underlying backing store accordingly. The method honors the semantics of
@@ -81,38 +81,6 @@
   // destination array with the hole.
   static const int kCopyToEndAndInitializeToHole = -2;
 
-  // Copy elements from one backing store to another. Typically, callers specify
-  // the source JSObject or JSArray in source_holder. If the holder's backing
-  // store is available, it can be passed in source and source_holder is
-  // ignored.
-  virtual void CopyElements(
-      Handle<FixedArrayBase> source,
-      uint32_t source_start,
-      ElementsKind source_kind,
-      Handle<FixedArrayBase> destination,
-      uint32_t destination_start,
-      int copy_size) = 0;
-
-  // NOTE: this method violates the handlified function signature convention:
-  // raw pointer parameter |source_holder| in the function that allocates.
-  // This is done intentionally to avoid ArrayConcat() builtin performance
-  // degradation.
-  virtual void CopyElements(
-      JSObject* source_holder,
-      uint32_t source_start,
-      ElementsKind source_kind,
-      Handle<FixedArrayBase> destination,
-      uint32_t destination_start,
-      int copy_size) = 0;
-
-  inline void CopyElements(
-      Handle<JSObject> from_holder,
-      Handle<FixedArrayBase> to,
-      ElementsKind from_kind) {
-    CopyElements(
-      *from_holder, 0, from_kind, to, 0, kCopyToEndAndInitializeToHole);
-  }
-
   // Copy all indices that have elements from |object| into the given
   // KeyAccumulator. For Dictionary-based element-kinds we filter out elements
   // whose PropertyAttribute match |filter|.
@@ -142,8 +110,7 @@
   static void InitializeOncePerProcess();
   static void TearDown();
 
-  virtual void Set(FixedArrayBase* backing_store, uint32_t entry,
-                   Object* value) = 0;
+  virtual void Set(Handle<JSObject> holder, uint32_t entry, Object* value) = 0;
 
   virtual void Reconfigure(Handle<JSObject> object,
                            Handle<FixedArrayBase> backing_store, uint32_t entry,
@@ -183,9 +150,6 @@
  protected:
   friend class LookupIterator;
 
-  static ElementsAccessor* ForArray(FixedArrayBase* array);
-
-
   // Element handlers distinguish between entries and indices when they
   // manipulate elements. Entries refer to elements in terms of their location
   // in the underlying storage's backing store representation, and are between 0
@@ -197,8 +161,15 @@
   virtual uint32_t GetEntryForIndex(JSObject* holder,
                                     FixedArrayBase* backing_store,
                                     uint32_t index) = 0;
-  virtual PropertyDetails GetDetails(FixedArrayBase* backing_store,
-                                     uint32_t entry) = 0;
+
+  // NOTE: this method violates the handlified function signature convention:
+  // raw pointer parameter |source_holder| in the function that allocates.
+  // This is done intentionally to avoid ArrayConcat() builtin performance
+  // degradation.
+  virtual void CopyElements(JSObject* source_holder, uint32_t source_start,
+                            ElementsKind source_kind,
+                            Handle<FixedArrayBase> destination,
+                            uint32_t destination_start, int copy_size) = 0;
 
  private:
   virtual uint32_t GetCapacity(JSObject* holder,
diff --git a/src/execution.cc b/src/execution.cc
index d4efb76..e6a569f 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -144,8 +144,8 @@
       if (receiver->IsUndefined() || receiver->IsNull()) {
         receiver = handle(function->global_proxy(), isolate);
       } else {
-        ASSIGN_RETURN_ON_EXCEPTION(
-            isolate, receiver, Execution::ToObject(isolate, receiver), Object);
+        ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+                                   Object::ToObject(isolate, receiver), Object);
       }
     }
     DCHECK(function->context()->global_object()->IsJSGlobalObject());
@@ -421,18 +421,6 @@
 // --- C a l l s   t o   n a t i v e s ---
 
 
-MaybeHandle<JSReceiver> Execution::ToObject(Isolate* isolate,
-                                            Handle<Object> obj) {
-  Handle<JSReceiver> receiver;
-  if (JSReceiver::ToObject(isolate, obj).ToHandle(&receiver)) {
-    return receiver;
-  }
-  THROW_NEW_ERROR(isolate,
-                  NewTypeError(MessageTemplate::kUndefinedOrNullToObject),
-                  JSReceiver);
-}
-
-
 Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
                                             Handle<JSFunction> fun,
                                             Handle<Object> pos,
@@ -492,7 +480,7 @@
 
   isolate_->counters()->stack_interrupts()->Increment();
   isolate_->counters()->runtime_profiler_ticks()->Increment();
-  isolate_->runtime_profiler()->OptimizeNow();
+  isolate_->runtime_profiler()->MarkCandidatesForOptimization();
 
   return isolate_->heap()->undefined_value();
 }
diff --git a/src/execution.h b/src/execution.h
index 81b71b6..52c7628 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -49,10 +49,6 @@
                                      Handle<Object> argv[],
                                      MaybeHandle<Object>* exception_out = NULL);
 
-  // ECMA-262 9.9
-  MUST_USE_RESULT static MaybeHandle<JSReceiver> ToObject(Isolate* isolate,
-                                                          Handle<Object> obj);
-
   static Handle<String> GetStackTraceLine(Handle<Object> recv,
                                           Handle<JSFunction> fun,
                                           Handle<Object> pos,
diff --git a/src/extensions/externalize-string-extension.cc b/src/extensions/externalize-string-extension.cc
index 9241e9f..2ed3ad2 100644
--- a/src/extensions/externalize-string-extension.cc
+++ b/src/extensions/externalize-string-extension.cc
@@ -36,10 +36,10 @@
 typedef SimpleStringResource<uc16, v8::String::ExternalStringResource>
     SimpleTwoByteStringResource;
 
-
 const char* const ExternalizeStringExtension::kSource =
     "native function externalizeString();"
-    "native function isOneByteString();";
+    "native function isOneByteString();"
+    "function x() { return 1; }";
 
 v8::Local<v8::FunctionTemplate>
 ExternalizeStringExtension::GetNativeFunctionTemplate(
diff --git a/src/factory.cc b/src/factory.cc
index f03e6b2..15ddb5f 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -211,13 +211,6 @@
 }
 
 
-// Internalized strings are created in the old generation (data space).
-Handle<String> Factory::InternalizeString(Handle<String> string) {
-  if (string->IsInternalizedString()) return string;
-  return StringTable::LookupString(isolate(), string);
-}
-
-
 Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
   OneByteStringKey key(string, isolate()->heap()->HashSeed());
   return InternalizeStringWithKey(&key);
@@ -243,12 +236,6 @@
 }
 
 
-Handle<Name> Factory::InternalizeName(Handle<Name> name) {
-  if (name->IsUniqueName()) return name;
-  return InternalizeString(Handle<String>::cast(name));
-}
-
-
 MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
                                                   PretenureFlag pretenure) {
   int length = string.length();
@@ -868,10 +855,9 @@
 }
 
 
-Handle<ExecutableAccessorInfo> Factory::NewExecutableAccessorInfo() {
-  Handle<ExecutableAccessorInfo> info =
-      Handle<ExecutableAccessorInfo>::cast(
-          NewStruct(EXECUTABLE_ACCESSOR_INFO_TYPE));
+Handle<AccessorInfo> Factory::NewAccessorInfo() {
+  Handle<AccessorInfo> info =
+      Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE));
   info->set_flag(0);  // Must clear the flag, it was initialized as undefined.
   return info;
 }
@@ -1042,6 +1028,13 @@
                      FixedArray);
 }
 
+Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array,
+                                               int new_len,
+                                               PretenureFlag pretenure) {
+  CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->CopyFixedArrayUpTo(
+                                    *array, new_len, pretenure),
+                     FixedArray);
+}
 
 Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
   CALL_HEAP_FUNCTION(isolate(),
@@ -1488,6 +1481,12 @@
                      Code);
 }
 
+Handle<BytecodeArray> Factory::CopyBytecodeArray(
+    Handle<BytecodeArray> bytecode_array) {
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->CopyBytecodeArray(*bytecode_array),
+                     BytecodeArray);
+}
 
 Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
                                       PretenureFlag pretenure) {
@@ -1738,16 +1737,6 @@
 }
 
 
-Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
-                                                      Handle<Object> done) {
-  Handle<JSIteratorResult> result = Handle<JSIteratorResult>::cast(
-      NewJSObjectFromMap(isolate()->iterator_result_map()));
-  result->set_value(*value);
-  result->set_done(*done);
-  return result;
-}
-
-
 namespace {
 
 ElementsKind GetExternalArrayElementsKind(ExternalArrayType type) {
@@ -1967,9 +1956,9 @@
 
   // Determine the prototype of the {target_function}.
   Handle<Object> prototype;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate(), prototype,
-                             Object::GetPrototype(isolate(), target_function),
-                             JSBoundFunction);
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate(), prototype,
+      JSReceiver::GetPrototype(isolate(), target_function), JSBoundFunction);
 
   // Create the [[BoundArguments]] for the result.
   Handle<FixedArray> bound_arguments;
@@ -2001,7 +1990,6 @@
   result->set_bound_target_function(*target_function);
   result->set_bound_this(*bound_this);
   result->set_bound_arguments(*bound_arguments);
-  result->set_creation_context(*isolate()->native_context());
   result->set_length(Smi::FromInt(0));
   result->set_name(*undefined_value(), SKIP_WRITE_BARRIER);
   return result;
@@ -2124,6 +2112,10 @@
 
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
     Handle<String> name, MaybeHandle<Code> maybe_code, bool is_constructor) {
+  // Function names are assumed to be flat elsewhere. Must flatten before
+  // allocating SharedFunctionInfo to avoid GC seeing the uninitialized SFI.
+  name = String::Flatten(name, TENURED);
+
   Handle<Map> map = shared_function_info_map();
   Handle<SharedFunctionInfo> share = New<SharedFunctionInfo>(map, OLD_SPACE);
 
@@ -2143,7 +2135,7 @@
   share->set_instance_class_name(*Object_string());
   share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
   share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
-  share->set_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
+  share->set_debug_info(DebugInfo::uninitialized(), SKIP_WRITE_BARRIER);
   share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
   StaticFeedbackVectorSpec empty_spec;
   Handle<TypeFeedbackMetadata> feedback_metadata =
@@ -2261,7 +2253,14 @@
   Handle<DebugInfo> debug_info =
       Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
   debug_info->set_shared(*shared);
-  debug_info->set_code(shared->code());
+  if (shared->HasBytecodeArray()) {
+    // Create a copy for debugging.
+    Handle<BytecodeArray> original(shared->bytecode_array(), isolate());
+    Handle<BytecodeArray> copy = CopyBytecodeArray(original);
+    debug_info->set_abstract_code(AbstractCode::cast(*copy));
+  } else {
+    debug_info->set_abstract_code(AbstractCode::cast(shared->code()));
+  }
   debug_info->set_break_points(*break_points);
 
   // Link debug info to function.
diff --git a/src/factory.h b/src/factory.h
index 01a2f7e..dd107d1 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -70,7 +70,7 @@
   Handle<String> InternalizeUtf8String(const char* str) {
     return InternalizeUtf8String(CStrVector(str));
   }
-  Handle<String> InternalizeString(Handle<String> str);
+
   Handle<String> InternalizeOneByteString(Vector<const uint8_t> str);
   Handle<String> InternalizeOneByteString(
       Handle<SeqOneByteString>, int from, int length);
@@ -80,8 +80,16 @@
   template<class StringTableKey>
   Handle<String> InternalizeStringWithKey(StringTableKey* key);
 
-  Handle<Name> InternalizeName(Handle<Name> name);
+  // Internalized strings are created in the old generation (data space).
+  Handle<String> InternalizeString(Handle<String> string) {
+    if (string->IsInternalizedString()) return string;
+    return StringTable::LookupString(isolate(), string);
+  }
 
+  Handle<Name> InternalizeName(Handle<Name> name) {
+    if (name->IsUniqueName()) return name;
+    return StringTable::LookupString(isolate(), Handle<String>::cast(name));
+  }
 
   // String creation functions.  Most of the string creation functions take
   // a Heap::PretenureFlag argument to optionally request that they be
@@ -262,7 +270,7 @@
   Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
       int aliased_context_slot);
 
-  Handle<ExecutableAccessorInfo> NewExecutableAccessorInfo();
+  Handle<AccessorInfo> NewAccessorInfo();
 
   Handle<Script> NewScript(Handle<String> source);
 
@@ -323,6 +331,9 @@
       Handle<FixedArray> array, int grow_by,
       PretenureFlag pretenure = NOT_TENURED);
 
+  Handle<FixedArray> CopyFixedArrayUpTo(Handle<FixedArray> array, int new_len,
+                                        PretenureFlag pretenure = NOT_TENURED);
+
   Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
 
   // This method expects a COW array in new space, and creates a copy
@@ -395,11 +406,6 @@
 
   // JS arrays are pretenured when allocated by the parser.
 
-  // Create a JSArray with no elements.
-  Handle<JSArray> NewJSArray(ElementsKind elements_kind,
-                             Strength strength = Strength::WEAK,
-                             PretenureFlag pretenure = NOT_TENURED);
-
   // Create a JSArray with a specified length and elements initialized
   // according to the specified mode.
   Handle<JSArray> NewJSArray(
@@ -474,11 +480,6 @@
   Handle<JSMapIterator> NewJSMapIterator();
   Handle<JSSetIterator> NewJSSetIterator();
 
-  // Creates a new JSIteratorResult object with the arguments {value} and
-  // {done}.  Implemented according to ES6 section 7.4.7 CreateIterResultObject.
-  Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value,
-                                               Handle<Object> done);
-
   // Allocates a bound function.
   MaybeHandle<JSBoundFunction> NewJSBoundFunction(
       Handle<JSReceiver> target_function, Handle<Object> bound_this,
@@ -548,6 +549,8 @@
 
   Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
 
+  Handle<BytecodeArray> CopyBytecodeArray(Handle<BytecodeArray>);
+
   // Interface for creating error objects.
   Handle<Object> NewError(Handle<JSFunction> constructor,
                           Handle<String> message);
@@ -704,6 +707,11 @@
                                  Handle<SharedFunctionInfo> info,
                                  Handle<Context> context,
                                  PretenureFlag pretenure = TENURED);
+
+  // Create a JSArray with no elements and no length.
+  Handle<JSArray> NewJSArray(ElementsKind elements_kind,
+                             Strength strength = Strength::WEAK,
+                             PretenureFlag pretenure = NOT_TENURED);
 };
 
 }  // namespace internal
diff --git a/src/field-type.cc b/src/field-type.cc
new file mode 100644
index 0000000..76d694c
--- /dev/null
+++ b/src/field-type.cc
@@ -0,0 +1,91 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/field-type.h"
+
+#include "src/handles-inl.h"
+#include "src/ostreams.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+FieldType* FieldType::None() {
+  return reinterpret_cast<FieldType*>(Smi::FromInt(0));
+}
+
+// static
+FieldType* FieldType::Any() {
+  return reinterpret_cast<FieldType*>(Smi::FromInt(1));
+}
+
+// static
+Handle<FieldType> FieldType::None(Isolate* isolate) {
+  return handle(None(), isolate);
+}
+
+// static
+Handle<FieldType> FieldType::Any(Isolate* isolate) {
+  return handle(Any(), isolate);
+}
+
+// static
+FieldType* FieldType::Class(i::Map* map) { return FieldType::cast(map); }
+
+// static
+Handle<FieldType> FieldType::Class(i::Handle<i::Map> map, Isolate* isolate) {
+  return handle(Class(*map), isolate);
+}
+
+// static
+FieldType* FieldType::cast(Object* object) {
+  DCHECK(object == None() || object == Any() || object->IsMap());
+  return reinterpret_cast<FieldType*>(object);
+}
+
+bool FieldType::IsClass() { return this->IsMap(); }
+
+Handle<i::Map> FieldType::AsClass() {
+  DCHECK(IsClass());
+  i::Map* map = Map::cast(this);
+  return handle(map, map->GetIsolate());
+}
+
+bool FieldType::NowStable() {
+  return !this->IsClass() || this->AsClass()->is_stable();
+}
+
+bool FieldType::NowIs(FieldType* other) {
+  if (other->IsAny()) return true;
+  if (IsNone()) return true;
+  if (other->IsNone()) return false;
+  if (IsAny()) return false;
+  DCHECK(IsClass());
+  DCHECK(other->IsClass());
+  return this == other;
+}
+
+bool FieldType::NowIs(Handle<FieldType> other) { return NowIs(*other); }
+
+Type* FieldType::Convert(Zone* zone) {
+  if (IsAny()) return Type::Any();
+  if (IsNone()) return Type::None();
+  DCHECK(IsClass());
+  return Type::Class(AsClass(), zone);
+}
+
+void FieldType::PrintTo(std::ostream& os) {
+  if (IsAny()) {
+    os << "Any";
+  } else if (IsNone()) {
+    os << "None";
+  } else {
+    DCHECK(IsClass());
+    os << "Class(" << static_cast<void*>(*AsClass()) << ")";
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/field-type.h b/src/field-type.h
new file mode 100644
index 0000000..eb7ffca
--- /dev/null
+++ b/src/field-type.h
@@ -0,0 +1,49 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FIELD_TYPE_H_
+#define V8_FIELD_TYPE_H_
+
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+class FieldType : public Object {
+ public:
+  static FieldType* None();
+  static FieldType* Any();
+  static Handle<FieldType> None(Isolate* isolate);
+  static Handle<FieldType> Any(Isolate* isolate);
+  static FieldType* Class(i::Map* map);
+  static Handle<FieldType> Class(i::Handle<i::Map> map, Isolate* isolate);
+  static FieldType* cast(Object* object);
+
+  bool NowContains(Object* value) {
+    if (this == Any()) return true;
+    if (this == None()) return false;
+    if (!value->IsHeapObject()) return false;
+    return HeapObject::cast(value)->map() == Map::cast(this);
+  }
+
+  bool NowContains(Handle<Object> value) { return NowContains(*value); }
+
+  bool IsClass();
+  Handle<i::Map> AsClass();
+  bool IsNone() { return this == None(); }
+  bool IsAny() { return this == Any(); }
+  bool NowStable();
+  bool NowIs(FieldType* other);
+  bool NowIs(Handle<FieldType> other);
+  Type* Convert(Zone* zone);
+
+  void PrintTo(std::ostream& os);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_FIELD_TYPE_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 9966a70..ac430ab 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -144,6 +144,11 @@
 #else
 # define ENABLE_NEON_DEFAULT false
 #endif
+#ifdef V8_OS_WIN
+# define ENABLE_LOG_COLOUR false
+#else
+# define ENABLE_LOG_COLOUR true
+#endif
 
 #define DEFINE_BOOL(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
 #define DEFINE_BOOL_READONLY(nam, def, cmt) \
@@ -196,39 +201,44 @@
 // Activate on ClusterFuzz.
 DEFINE_IMPLICATION(es_staging, harmony_regexp_lookbehind)
 DEFINE_IMPLICATION(es_staging, move_object_start)
+DEFINE_IMPLICATION(es_staging, harmony_tailcalls)
 
 // Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V)                                 \
-  V(harmony_modules, "harmony modules")                       \
-  V(harmony_unicode_regexps, "harmony unicode regexps")       \
-  V(harmony_function_name, "harmony Function name inference") \
-  V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")   \
-  V(harmony_simd, "harmony simd")                             \
-  V(harmony_do_expressions, "harmony do-expressions")         \
-  V(harmony_regexp_subclass, "harmony regexp subclassing")    \
-  V(harmony_species, "harmony Symbol.species")
+#define HARMONY_INPROGRESS(V)                                                \
+  V(harmony_object_observe, "harmony Object.observe")                        \
+  V(harmony_modules, "harmony modules")                                      \
+  V(harmony_function_sent, "harmony function.sent")                          \
+  V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")                  \
+  V(harmony_simd, "harmony simd")                                            \
+  V(harmony_do_expressions, "harmony do-expressions")                        \
+  V(harmony_iterator_close, "harmony iterator finalization")                 \
+  V(harmony_tailcalls, "harmony tail calls")                                 \
+  V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
+  V(harmony_object_own_property_descriptors,                                 \
+    "harmony Object.getOwnPropertyDescriptors()")                            \
+  V(harmony_regexp_property, "harmony unicode regexp property classes")
 
 // Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V)                                                 \
-  V(harmony_regexp_lookbehind, "harmony regexp lookbehind")
+#define HARMONY_STAGED(V)                                     \
+  V(harmony_function_name, "harmony Function name inference") \
+  V(harmony_regexp_lookbehind, "harmony regexp lookbehind")   \
+  V(harmony_species, "harmony Symbol.species")                \
+  V(harmony_instanceof, "harmony instanceof support")
 
 // Features that are shipping (turned on by default, but internal flag remains).
 #define HARMONY_SHIPPING(V)                                               \
   V(harmony_default_parameters, "harmony default parameters")             \
   V(harmony_destructuring_assignment, "harmony destructuring assignment") \
   V(harmony_destructuring_bind, "harmony destructuring bind")             \
-  V(harmony_concat_spreadable, "harmony isConcatSpreadable")              \
-  V(harmony_object_observe, "harmony Object.observe")                     \
-  V(harmony_tolength, "harmony ToLength")                                 \
   V(harmony_tostring, "harmony toString")                                 \
-  V(harmony_completion, "harmony completion value semantics")             \
   V(harmony_regexps, "harmony regular expression extensions")             \
+  V(harmony_unicode_regexps, "harmony unicode regexps")                   \
   V(harmony_sloppy, "harmony features in sloppy mode")                    \
   V(harmony_sloppy_let, "harmony let in sloppy mode")                     \
   V(harmony_sloppy_function, "harmony sloppy function block scoping")     \
   V(harmony_proxies, "harmony proxies")                                   \
-  V(harmony_reflect, "harmony Reflect API")
-
+  V(harmony_reflect, "harmony Reflect API")                               \
+  V(harmony_regexp_subclass, "harmony regexp subclassing")
 
 // Once a shipping feature has proved stable in the wild, it will be dropped
 // from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -297,13 +307,10 @@
 // Flags for Ignition.
 DEFINE_BOOL(ignition, false, "use ignition interpreter")
 DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
-DEFINE_BOOL(ignition_fake_try_catch, false,
-            "enable fake try-catch-finally blocks in ignition for testing")
-DEFINE_BOOL(ignition_fallback_on_eval_and_catch, false,
-            "fallback to full-codegen for functions which contain eval, catch"
-            "and es6 blocks")
 DEFINE_BOOL(print_bytecode, false,
             "print bytecode generated by ignition interpreter")
+DEFINE_BOOL(trace_ignition, false,
+            "trace the bytecodes executed by the ignition interpreter")
 DEFINE_BOOL(trace_ignition_codegen, false,
             "trace the codegen of ignition interpreter bytecode handlers")
 
@@ -328,7 +335,7 @@
            "maximum cumulative number of AST nodes considered for inlining")
 DEFINE_BOOL(loop_invariant_code_motion, true, "loop invariant code motion")
 DEFINE_BOOL(fast_math, true, "faster (but maybe less accurate) math functions")
-DEFINE_BOOL(collect_megamorphic_maps_from_stub_cache, true,
+DEFINE_BOOL(collect_megamorphic_maps_from_stub_cache, false,
             "crankshaft harvests type feedback from stub cache")
 DEFINE_BOOL(hydrogen_stats, false, "print statistics for hydrogen")
 DEFINE_BOOL(trace_check_elimination, false, "trace check elimination phase")
@@ -395,8 +402,6 @@
 DEFINE_INT(escape_analysis_iterations, 2,
            "maximum number of escape analysis fix-point iterations")
 
-DEFINE_BOOL(optimize_for_in, true, "optimize functions containing for-in loops")
-
 DEFINE_BOOL(concurrent_recompilation, true,
             "optimizing hot functions asynchronously on a separate thread")
 DEFINE_BOOL(trace_concurrent_recompilation, false,
@@ -417,7 +422,6 @@
 // Flags for TurboFan.
 DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
 DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
-DEFINE_IMPLICATION(turbo, turbo_inlining)
 DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
 DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
 DEFINE_BOOL(turbo_sp_frame_access, false,
@@ -450,7 +454,7 @@
             "enable function context specialization in TurboFan")
 DEFINE_BOOL(native_context_specialization, true,
             "enable native context specialization in TurboFan")
-DEFINE_BOOL(turbo_inlining, false, "enable inlining in TurboFan")
+DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
 DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
 DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
 DEFINE_BOOL(turbo_profiling, false, "enable profiling in TurboFan")
@@ -459,7 +463,6 @@
 DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
 DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
 DEFINE_BOOL(turbo_osr, true, "enable OSR in TurboFan")
-DEFINE_BOOL(turbo_try_finally, false, "enable try-finally support in TurboFan")
 DEFINE_BOOL(turbo_stress_loop_peeling, false,
             "stress loop peeling optimization")
 DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
@@ -467,18 +470,26 @@
 DEFINE_BOOL(turbo_cache_shared_code, true, "cache context-independent code")
 DEFINE_BOOL(turbo_preserve_shared_code, false, "keep context-independent code")
 DEFINE_BOOL(turbo_escape, false, "enable escape analysis")
-DEFINE_BOOL(trace_turbo_escape, false, "enable tracing in escape analysis")
 DEFINE_BOOL(turbo_instruction_scheduling, false,
             "enable instruction scheduling in TurboFan")
+DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
+            "randomly schedule instructions to stress dependency tracking")
 
 // Flags for native WebAssembly.
 DEFINE_BOOL(expose_wasm, false, "expose WASM interface to JavaScript")
 DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
 DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
 DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
+DEFINE_BOOL(trace_wasm_ast, false, "dump AST after WASM decode")
 DEFINE_BOOL(wasm_break_on_decoder_error, false,
             "debug break when wasm decoder encounters an error")
 
+DEFINE_BOOL(enable_simd_asmjs, false, "enable SIMD.js in asm.js stdlib")
+
+DEFINE_BOOL(dump_asmjs_wasm, false, "dump Asm.js to WASM module bytes")
+DEFINE_STRING(asmjs_wasm_dumpfile, "asmjs.wasm",
+              "file to dump asm wasm conversion result to")
+
 DEFINE_INT(typed_array_max_size_in_heap, 64,
            "threshold for in-heap typed array")
 
@@ -626,13 +637,14 @@
 // full-codegen.cc
 DEFINE_BOOL(always_inline_smi_code, false,
             "always inline smi code in non-opt code")
+DEFINE_BOOL(verify_operand_stack_depth, false,
+            "emit debug code that verifies the static tracking of the operand "
+            "stack depth")
 
 // heap.cc
 DEFINE_INT(min_semi_space_size, 0,
            "min size of a semi-space (in MBytes), the new space consists of two"
            "semi-spaces")
-DEFINE_INT(target_semi_space_size, 0,
-           "target size of a semi-space (in MBytes) before triggering a GC")
 DEFINE_INT(max_semi_space_size, 0,
            "max size of a semi-space (in MBytes), the new space consists of two"
            "semi-spaces")
@@ -721,6 +733,11 @@
             "Dump heap object allocations/movements/size_updates")
 
 
+// sampling-heap-profiler.cc
+DEFINE_BOOL(sampling_heap_profiler_suppress_randomness, false,
+            "Use constant sample intervals to eliminate test flakiness")
+
+
 // v8.cc
 DEFINE_BOOL(use_idle_notification, true,
             "Use idle notification to reduce memory footprint.")
@@ -787,7 +804,8 @@
            "in kBytes (default is 2 MB)")
 DEFINE_BOOL(log_regs_modified, true,
             "When logging register values, only print modified registers.")
-DEFINE_BOOL(log_colour, true, "When logging, try to use coloured output.")
+DEFINE_BOOL(log_colour, ENABLE_LOG_COLOUR,
+            "When logging, try to use coloured output.")
 DEFINE_BOOL(ignore_asm_unimplemented_break, false,
             "Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
 DEFINE_BOOL(trace_sim_messages, false,
@@ -805,6 +823,9 @@
            "Fixed seed to use to hash property keys (0 means random)"
            "(with snapshots this option cannot override the baked-in seed)")
 
+// runtime.cc
+DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
+
 // snapshot-common.cc
 DEFINE_BOOL(profile_deserialization, false,
             "Print the time it takes to deserialize the snapshot.")
@@ -864,6 +885,10 @@
            "Time spent in incremental marking steps (in ms) once the external "
            "allocation limit is reached")
 
+DEFINE_BOOL(disable_old_api_accessors, false,
+            "Disable old-style API accessors whose setters trigger through the "
+            "prototype chain")
+
 //
 // Dev shell flags
 //
@@ -919,7 +944,6 @@
             "pretty print source code for builtins")
 DEFINE_BOOL(print_ast, false, "print source AST")
 DEFINE_BOOL(print_builtin_ast, false, "print source AST for builtins")
-DEFINE_STRING(stop_at, "", "function name where to insert a breakpoint")
 DEFINE_BOOL(trap_on_abort, false, "replace aborts by breakpoints")
 
 // compiler.cc
@@ -941,6 +965,7 @@
 // TurboFan debug-only flags.
 DEFINE_BOOL(print_turbo_replay, false,
             "print C++ code to recreate TurboFan graphs")
+DEFINE_BOOL(trace_turbo_escape, false, "enable tracing in escape analysis")
 
 // objects.cc
 DEFINE_BOOL(trace_normalization, false,
@@ -953,6 +978,8 @@
 DEFINE_BOOL(collect_heap_spill_statistics, false,
             "report heap spill statistics along with heap_stats "
             "(requires heap_stats)")
+DEFINE_BOOL(trace_live_bytes, false,
+            "trace incrementing and resetting of live bytes")
 
 DEFINE_BOOL(trace_isolates, false, "trace isolate state changes")
 
diff --git a/src/frames.cc b/src/frames.cc
index d60ab29..50a2e21 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -121,15 +121,15 @@
 
 StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
 #define FRAME_TYPE_CASE(type, field) \
-  case StackFrame::type: result = &field##_; break;
+  case StackFrame::type:             \
+    return &field##_;
 
-  StackFrame* result = NULL;
   switch (type) {
     case StackFrame::NONE: return NULL;
     STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
     default: break;
   }
-  return result;
+  return NULL;
 
 #undef FRAME_TYPE_CASE
 }
@@ -234,17 +234,8 @@
   }
   if (SingletonFor(type) == NULL) return;
   frame_ = SingletonFor(type, &state);
-  if (frame_ == NULL) return;
-
+  DCHECK(frame_);
   Advance();
-
-  if (frame_ != NULL && !frame_->is_exit() &&
-      external_callback_scope_ != NULL &&
-      external_callback_scope_->scope_address() < frame_->fp()) {
-    // Skip top ExternalCallbackScope if we already advanced to a JS frame
-    // under it. Sampler will anyways take this top external callback.
-    external_callback_scope_ = external_callback_scope_->previous();
-  }
 }
 
 
@@ -272,8 +263,12 @@
   // Advance to the previous frame.
   StackFrame::State state;
   StackFrame::Type type = frame_->GetCallerState(&state);
+  if (SingletonFor(type) == NULL) {
+    frame_ = NULL;
+    return;
+  }
   frame_ = SingletonFor(type, &state);
-  if (frame_ == NULL) return;
+  DCHECK(frame_);
 
   // Check that we have actually moved to the previous frame in the stack.
   if (frame_->sp() < last_sp || frame_->fp() < last_fp) {
@@ -325,22 +320,30 @@
 void SafeStackFrameIterator::Advance() {
   while (true) {
     AdvanceOneFrame();
-    if (done()) return;
-    if (frame_->is_java_script()) return;
-    if (frame_->is_exit() && external_callback_scope_) {
+    if (done()) break;
+    ExternalCallbackScope* last_callback_scope = NULL;
+    while (external_callback_scope_ != NULL &&
+           external_callback_scope_->scope_address() < frame_->fp()) {
+      // As long as the setup of a frame is not atomic, we may happen to be
+      // in an interval where an ExternalCallbackScope is already created,
+      // but the frame is not yet entered. So we are actually observing
+      // the previous frame.
+      // Skip all the ExternalCallbackScope's that are below the current fp.
+      last_callback_scope = external_callback_scope_;
+      external_callback_scope_ = external_callback_scope_->previous();
+    }
+    if (frame_->is_java_script()) break;
+    if (frame_->is_exit()) {
       // Some of the EXIT frames may have ExternalCallbackScope allocated on
       // top of them. In that case the scope corresponds to the first EXIT
       // frame beneath it. There may be other EXIT frames on top of the
       // ExternalCallbackScope, just skip them as we cannot collect any useful
       // information about them.
-      if (external_callback_scope_->scope_address() < frame_->fp()) {
+      if (last_callback_scope) {
         frame_->state_.pc_address =
-            external_callback_scope_->callback_entrypoint_address();
-        external_callback_scope_ = external_callback_scope_->previous();
-        DCHECK(external_callback_scope_ == NULL ||
-               external_callback_scope_->scope_address() > frame_->fp());
-        return;
+            last_callback_scope->callback_entrypoint_address();
       }
+      break;
     }
   }
 }
@@ -411,6 +414,12 @@
     // the VM with a signal at any arbitrary instruction, with essentially
     // anything on the stack. So basically none of these checks are 100%
     // reliable.
+#if defined(USE_SIMULATOR)
+    MSAN_MEMORY_IS_INITIALIZED(
+        state->fp + StandardFrameConstants::kContextOffset, kPointerSize);
+    MSAN_MEMORY_IS_INITIALIZED(
+        state->fp + StandardFrameConstants::kMarkerOffset, kPointerSize);
+#endif
     if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
       // An adapter frame has a special SMI constant for the context and
       // is not distinguished through the marker.
@@ -446,7 +455,8 @@
             return ARGUMENTS_ADAPTOR;
           } else {
             // The interpreter entry trampoline has a non-SMI marker.
-            DCHECK(code_obj->is_interpreter_entry_trampoline());
+            DCHECK(code_obj->is_interpreter_entry_trampoline() ||
+                   code_obj->is_interpreter_enter_bytecode_dispatch());
             return INTERPRETED;
           }
         }
@@ -598,23 +608,14 @@
   return fp() + offset - n * kPointerSize;
 }
 
-
-Object* StandardFrame::GetExpression(Address fp, int index) {
-  return Memory::Object_at(GetExpressionAddress(fp, index));
+Address InterpretedFrame::GetExpressionAddress(int n) const {
+  const int offset = InterpreterFrameConstants::kExpressionsOffset;
+  return fp() + offset - n * kPointerSize;
 }
 
-
-Address StandardFrame::GetExpressionAddress(Address fp, int n) {
-  const int offset = StandardFrameConstants::kExpressionsOffset;
-  return fp + offset - n * kPointerSize;
-}
-
-
 int StandardFrame::ComputeExpressionsCount() const {
-  const int offset =
-      StandardFrameConstants::kExpressionsOffset + kPointerSize;
-  Address base = fp() + offset;
-  Address limit = sp();
+  Address base = GetExpressionAddress(0);
+  Address limit = sp() - kPointerSize;
   DCHECK(base >= limit);  // stack grows downwards
   // Include register-allocated locals in number of expressions.
   return static_cast<int>((base - limit) / kPointerSize);
@@ -647,7 +648,8 @@
   SafepointEntry safepoint_entry;
   Code* code = StackFrame::GetSafepointData(
       isolate(), pc(), &safepoint_entry, &stack_slots);
-  unsigned slot_space = stack_slots * kPointerSize;
+  unsigned slot_space =
+      stack_slots * kPointerSize - StandardFrameConstants::kFixedFrameSize;
 
   // Visit the outgoing parameters.
   Object** parameters_base = &Memory::Object_at(sp());
@@ -761,9 +763,7 @@
 int JavaScriptFrame::GetArgumentsLength() const {
   // If there is an arguments adaptor frame get the arguments length from it.
   if (has_adapted_arguments()) {
-    STATIC_ASSERT(ArgumentsAdaptorFrameConstants::kLengthOffset ==
-                  StandardFrameConstants::kExpressionsOffset);
-    return Smi::cast(GetExpression(caller_fp(), 0))->value();
+    return ArgumentsAdaptorFrame::GetLength(caller_fp());
   } else {
     return GetNumberOfIncomingArguments();
   }
@@ -796,24 +796,21 @@
 
 void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
   DCHECK(functions->length() == 0);
-  Code* code_pointer = LookupCode();
-  int offset = static_cast<int>(pc() - code_pointer->address());
-  FrameSummary summary(receiver(),
-                       function(),
-                       code_pointer,
-                       offset,
+  Code* code = LookupCode();
+  int offset = static_cast<int>(pc() - code->instruction_start());
+  AbstractCode* abstract_code = AbstractCode::cast(code);
+  FrameSummary summary(receiver(), function(), abstract_code, offset,
                        IsConstructor());
   functions->Add(summary);
 }
 
-
 int JavaScriptFrame::LookupExceptionHandlerInTable(
-    int* stack_slots, HandlerTable::CatchPrediction* prediction) {
+    int* stack_depth, HandlerTable::CatchPrediction* prediction) {
   Code* code = LookupCode();
   DCHECK(!code->is_optimized_code());
   HandlerTable* table = HandlerTable::cast(code->handler_table());
   int pc_offset = static_cast<int>(pc() - code->entry());
-  return table->LookupRange(pc_offset, stack_slots, prediction);
+  return table->LookupRange(pc_offset, stack_depth, prediction);
 }
 
 
@@ -826,7 +823,7 @@
   PrintF(file, "+%d", code_offset);
   if (print_line_number) {
     SharedFunctionInfo* shared = function->shared();
-    int source_pos = code->SourcePosition(pc);
+    int source_pos = code->SourcePosition(code_offset);
     Object* maybe_script = shared->script();
     if (maybe_script->IsScript()) {
       Script* script = Script::cast(maybe_script);
@@ -896,26 +893,30 @@
   }
 }
 
-
-FrameSummary::FrameSummary(Object* receiver, JSFunction* function, Code* code,
-                           int offset, bool is_constructor)
+FrameSummary::FrameSummary(Object* receiver, JSFunction* function,
+                           AbstractCode* abstract_code, int code_offset,
+                           bool is_constructor)
     : receiver_(receiver, function->GetIsolate()),
       function_(function),
-      code_(code),
-      offset_(offset),
+      abstract_code_(abstract_code),
+      code_offset_(code_offset),
       is_constructor_(is_constructor) {}
 
-
 void FrameSummary::Print() {
   PrintF("receiver: ");
   receiver_->ShortPrint();
   PrintF("\nfunction: ");
   function_->shared()->DebugName()->ShortPrint();
   PrintF("\ncode: ");
-  code_->ShortPrint();
-  if (code_->kind() == Code::FUNCTION) PrintF(" NON-OPT");
-  if (code_->kind() == Code::OPTIMIZED_FUNCTION) PrintF(" OPT");
-  PrintF("\npc: %d\n", offset_);
+  abstract_code_->ShortPrint();
+  if (abstract_code_->IsCode()) {
+    Code* code = abstract_code_->GetCode();
+    if (code->kind() == Code::FUNCTION) PrintF(" UNOPT ");
+    if (code->kind() == Code::OPTIMIZED_FUNCTION) PrintF(" OPT ");
+  } else {
+    PrintF(" BYTECODE ");
+  }
+  PrintF("\npc: %d\n", code_offset_);
 }
 
 
@@ -964,11 +965,9 @@
       JSFunction* function;
       if (opcode == Translation::LITERAL) {
         function = JSFunction::cast(literal_array->get(it.Next()));
-      } else if (opcode == Translation::STACK_SLOT) {
-        function = JSFunction::cast(StackSlotAt(it.Next()));
       } else {
-        CHECK_EQ(Translation::JS_FRAME_FUNCTION, opcode);
-        function = this->function();
+        CHECK_EQ(opcode, Translation::STACK_SLOT);
+        function = JSFunction::cast(StackSlotAt(it.Next()));
       }
       DCHECK_EQ(shared_info, function->shared());
 
@@ -982,8 +981,6 @@
         receiver = literal_array->get(it.Next());
       } else if (opcode == Translation::STACK_SLOT) {
         receiver = StackSlotAt(it.Next());
-      } else if (opcode == Translation::JS_FRAME_FUNCTION) {
-        receiver = this->function();
       } else {
         // The receiver is not in a stack slot nor in a literal.  We give up.
         it.Skip(Translation::NumberOfOperandsFor(opcode));
@@ -994,24 +991,26 @@
         receiver = isolate()->heap()->undefined_value();
       }
 
-      Code* const code = shared_info->code();
+      AbstractCode* abstract_code;
 
-      unsigned pc_offset;
+      unsigned code_offset;
       if (frame_opcode == Translation::JS_FRAME) {
+        Code* code = shared_info->code();
         DeoptimizationOutputData* const output_data =
             DeoptimizationOutputData::cast(code->deoptimization_data());
         unsigned const entry =
             Deoptimizer::GetOutputInfo(output_data, ast_id, shared_info);
-        pc_offset =
-            FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
-        DCHECK_NE(0U, pc_offset);
+        code_offset = FullCodeGenerator::PcField::decode(entry);
+        abstract_code = AbstractCode::cast(code);
       } else {
         // TODO(rmcilroy): Modify FrameSummary to enable us to summarize
         // based on the BytecodeArray and bytecode offset.
         DCHECK_EQ(frame_opcode, Translation::INTERPRETED_FRAME);
-        pc_offset = 0;
+        code_offset = 0;
+        abstract_code = AbstractCode::cast(shared_info->bytecode_array());
       }
-      FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
+      FrameSummary summary(receiver, function, abstract_code, code_offset,
+                           is_constructor);
       frames->Add(summary);
       is_constructor = false;
     } else if (frame_opcode == Translation::CONSTRUCT_STUB_FRAME) {
@@ -1034,7 +1033,7 @@
   DCHECK(code->is_optimized_code());
   HandlerTable* table = HandlerTable::cast(code->handler_table());
   int pc_offset = static_cast<int>(pc() - code->entry());
-  *stack_slots = code->stack_slots();
+  if (stack_slots) *stack_slots = code->stack_slots();
   return table->LookupReturn(pc_offset, prediction);
 }
 
@@ -1105,11 +1104,9 @@
       Object* function;
       if (opcode == Translation::LITERAL) {
         function = literal_array->get(it.Next());
-      } else if (opcode == Translation::STACK_SLOT) {
-        function = StackSlotAt(it.Next());
       } else {
-        CHECK_EQ(Translation::JS_FRAME_FUNCTION, opcode);
-        function = this->function();
+        CHECK_EQ(Translation::STACK_SLOT, opcode);
+        function = StackSlotAt(it.Next());
       }
       functions->Add(JSFunction::cast(function));
     }
@@ -1127,6 +1124,64 @@
   return Memory::Object_at(fp() + StackSlotOffsetRelativeToFp(index));
 }
 
+int InterpretedFrame::LookupExceptionHandlerInTable(
+    int* context_register, HandlerTable::CatchPrediction* prediction) {
+  BytecodeArray* bytecode = function()->shared()->bytecode_array();
+  HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+  int pc_offset = GetBytecodeOffset() + 1;  // Point after current bytecode.
+  return table->LookupRange(pc_offset, context_register, prediction);
+}
+
+int InterpretedFrame::GetBytecodeOffset() const {
+  const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
+  DCHECK_EQ(
+      InterpreterFrameConstants::kBytecodeOffsetFromFp,
+      InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+  int raw_offset = Smi::cast(GetExpression(index))->value();
+  return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
+}
+
+void InterpretedFrame::PatchBytecodeOffset(int new_offset) {
+  const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
+  DCHECK_EQ(
+      InterpreterFrameConstants::kBytecodeOffsetFromFp,
+      InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+  int raw_offset = new_offset + BytecodeArray::kHeaderSize - kHeapObjectTag;
+  SetExpression(index, Smi::FromInt(raw_offset));
+}
+
+Object* InterpretedFrame::GetBytecodeArray() const {
+  const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
+  DCHECK_EQ(
+      InterpreterFrameConstants::kBytecodeArrayFromFp,
+      InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+  return GetExpression(index);
+}
+
+void InterpretedFrame::PatchBytecodeArray(Object* bytecode_array) {
+  const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
+  DCHECK_EQ(
+      InterpreterFrameConstants::kBytecodeArrayFromFp,
+      InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+  SetExpression(index, bytecode_array);
+}
+
+Object* InterpretedFrame::GetInterpreterRegister(int register_index) const {
+  const int index = InterpreterFrameConstants::kRegisterFileExpressionIndex;
+  DCHECK_EQ(
+      InterpreterFrameConstants::kRegisterFilePointerFromFp,
+      InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+  return GetExpression(index + register_index);
+}
+
+void InterpretedFrame::Summarize(List<FrameSummary>* functions) {
+  DCHECK(functions->length() == 0);
+  AbstractCode* abstract_code =
+      AbstractCode::cast(function()->shared()->bytecode_array());
+  FrameSummary summary(receiver(), function(), abstract_code,
+                       GetBytecodeOffset(), IsConstructor());
+  functions->Add(summary);
+}
 
 int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
   return Smi::cast(GetExpression(0))->value();
@@ -1137,19 +1192,21 @@
   return fp() + StandardFrameConstants::kCallerSPOffset;
 }
 
-
-Address InternalFrame::GetCallerStackPointer() const {
-  // Internal frames have no arguments. The stack pointer of the
-  // caller is at a fixed offset from the frame pointer.
-  return fp() + StandardFrameConstants::kCallerSPOffset;
+int ArgumentsAdaptorFrame::GetLength(Address fp) {
+  const int offset = ArgumentsAdaptorFrameConstants::kLengthOffset;
+  return Smi::cast(Memory::Object_at(fp + offset))->value();
 }
 
-
 Code* ArgumentsAdaptorFrame::unchecked_code() const {
   return isolate()->builtins()->builtin(
       Builtins::kArgumentsAdaptorTrampoline);
 }
 
+Address InternalFrame::GetCallerStackPointer() const {
+  // Internal frames have no arguments. The stack pointer of the
+  // caller is at a fixed offset from the frame pointer.
+  return fp() + StandardFrameConstants::kCallerSPOffset;
+}
 
 Code* InternalFrame::unchecked_code() const {
   const int offset = InternalFrameConstants::kCodeOffset;
@@ -1212,7 +1269,8 @@
     Address pc = this->pc();
     if (code != NULL && code->kind() == Code::FUNCTION &&
         pc >= code->instruction_start() && pc < code->instruction_end()) {
-      int source_pos = code->SourcePosition(pc);
+      int offset = static_cast<int>(pc - code->instruction_start());
+      int source_pos = code->SourcePosition(offset);
       int line = script->GetLineNumber(source_pos) + 1;
       accumulator->Add(":%d", line);
     } else {
@@ -1369,7 +1427,6 @@
   IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
 }
 
-
 void InternalFrame::Iterate(ObjectVisitor* v) const {
   // Internal frames only have object pointers on the expression stack
   // as they never have any arguments.
@@ -1467,10 +1524,6 @@
 Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
     Address inner_pointer) {
   Heap* heap = isolate_->heap();
-  if (!heap->code_space()->Contains(inner_pointer) &&
-      !heap->lo_space()->Contains(inner_pointer)) {
-    return nullptr;
-  }
 
   // Check if the inner pointer points into a large object chunk.
   LargePage* large_page = heap->lo_space()->FindPage(inner_pointer);
@@ -1478,6 +1531,10 @@
     return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
   }
 
+  if (!heap->code_space()->Contains(inner_pointer)) {
+    return nullptr;
+  }
+
   // Iterate through the page until we reach the end or find an object starting
   // after the inner pointer.
   Page* page = Page::FromAddress(inner_pointer);
diff --git a/src/frames.h b/src/frames.h
index 674d7da..f33eb16 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -111,6 +111,43 @@
   V(CONSTRUCT, ConstructFrame)                           \
   V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
 
+// Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
+// two slots.
+//
+// Stack slot indices >= 0 access the callee stack with slot 0 corresponding to
+// the callee's saved return address and 1 corresponding to the saved frame
+// pointer. Some frames have additional information stored in the fixed header,
+// for example JSFunctions store the function context and marker in the fixed
+// header, with slot index 2 corresponding to the current function context and 3
+// corresponding to the frame marker/JSFunction.
+//
+//  slot      JS frame
+//       +-----------------+--------------------------------
+//  -n-1 |   parameter 0   |                            ^
+//       |- - - - - - - - -|                            |
+//  -n   |                 |                          Caller
+//  ...  |       ...       |                       frame slots
+//  -2   |  parameter n-1  |                       (slot < 0)
+//       |- - - - - - - - -|                            |
+//  -1   |   parameter n   |                            v
+//  -----+-----------------+--------------------------------
+//   0   |   return addr   |   ^                        ^
+//       |- - - - - - - - -|   |                        |
+//   1   | saved frame ptr | Fixed                      |
+//       |- - - - - - - - -| Header <-- frame ptr       |
+//   2   | [Constant Pool] |   |                        |
+//       |- - - - - - - - -|   |                        |
+// 2+cp  |     Context     |   |   if a constant pool   |
+//       |- - - - - - - - -|   |    is used, cp = 1,    |
+// 3+cp  |JSFunction/Marker|   v   otherwise, cp = 0    |
+//       +-----------------+----                        |
+// 4+cp  |                 |   ^                      Callee
+//       |- - - - - - - - -|   |                   frame slots
+//  ...  |                 | Frame slots           (slot >= 0)
+//       |- - - - - - - - -|   |                        |
+//       |                 |   v                        |
+//  -----+-----------------+----- <-- stack ptr -------------
+//
 
 class StandardFrameConstants : public AllStatic {
  public:
@@ -178,24 +215,47 @@
  public:
   // Fixed frame includes new.target and bytecode offset.
   static const int kFixedFrameSize =
-      StandardFrameConstants::kFixedFrameSize + 2 * kPointerSize;
+      StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
   static const int kFixedFrameSizeFromFp =
-      StandardFrameConstants::kFixedFrameSizeFromFp + 2 * kPointerSize;
+      StandardFrameConstants::kFixedFrameSizeFromFp + 3 * kPointerSize;
 
   // FP-relative.
-  static const int kRegisterFilePointerFromFp =
+  static const int kNewTargetFromFp =
+      -StandardFrameConstants::kFixedFrameSizeFromFp - 1 * kPointerSize;
+  static const int kBytecodeArrayFromFp =
+      -StandardFrameConstants::kFixedFrameSizeFromFp - 2 * kPointerSize;
+  static const int kBytecodeOffsetFromFp =
       -StandardFrameConstants::kFixedFrameSizeFromFp - 3 * kPointerSize;
+  static const int kRegisterFilePointerFromFp =
+      -StandardFrameConstants::kFixedFrameSizeFromFp - 4 * kPointerSize;
+
+  static const int kExpressionsOffset = kRegisterFilePointerFromFp;
+
+  // Expression index for {StandardFrame::GetExpressionAddress}.
+  static const int kBytecodeArrayExpressionIndex = -2;
+  static const int kBytecodeOffsetExpressionIndex = -1;
+  static const int kRegisterFileExpressionIndex = 0;
 
   // Register file pointer relative.
   static const int kLastParamFromRegisterPointer =
-      StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
+      StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
 
   static const int kBytecodeOffsetFromRegisterPointer = 1 * kPointerSize;
-  static const int kNewTargetFromRegisterPointer = 2 * kPointerSize;
-  static const int kFunctionFromRegisterPointer = 3 * kPointerSize;
-  static const int kContextFromRegisterPointer = 4 * kPointerSize;
+  static const int kBytecodeArrayFromRegisterPointer = 2 * kPointerSize;
+  static const int kNewTargetFromRegisterPointer = 3 * kPointerSize;
+  static const int kFunctionFromRegisterPointer = 4 * kPointerSize;
+  static const int kContextFromRegisterPointer = 5 * kPointerSize;
 };
 
+inline static int FPOffsetToFrameSlot(int frame_offset) {
+  return StandardFrameConstants::kFixedSlotCountAboveFp - 1 -
+         frame_offset / kPointerSize;
+}
+
+inline static int FrameSlotToFPOffset(int slot) {
+  return (StandardFrameConstants::kFixedSlotCountAboveFp - 1 - slot) *
+         kPointerSize;
+}
 
 // Abstract base class for all stack frames.
 class StackFrame BASE_EMBEDDED {
@@ -249,6 +309,7 @@
   bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
   bool is_exit() const { return type() == EXIT; }
   bool is_optimized() const { return type() == OPTIMIZED; }
+  bool is_interpreted() const { return type() == INTERPRETED; }
   bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
   bool is_internal() const { return type() == INTERNAL; }
   bool is_stub_failure_trampoline() const {
@@ -485,7 +546,6 @@
   inline Object* GetExpression(int index) const;
   inline void SetExpression(int index, Object* value);
   int ComputeExpressionsCount() const;
-  static Object* GetExpression(Address fp, int index);
 
   void SetCallerFp(Address caller_fp) override;
 
@@ -516,8 +576,7 @@
   void IterateExpressions(ObjectVisitor* v) const;
 
   // Returns the address of the n'th expression stack element.
-  Address GetExpressionAddress(int n) const;
-  static Address GetExpressionAddress(Address fp, int n);
+  virtual Address GetExpressionAddress(int n) const;
 
   // Determines if the standard frame for the given frame pointer is
   // an arguments adaptor frame.
@@ -538,14 +597,14 @@
 
 class FrameSummary BASE_EMBEDDED {
  public:
-  FrameSummary(Object* receiver, JSFunction* function, Code* code, int offset,
+  FrameSummary(Object* receiver, JSFunction* function,
+               AbstractCode* abstract_code, int code_offset,
                bool is_constructor);
 
   Handle<Object> receiver() { return receiver_; }
   Handle<JSFunction> function() { return function_; }
-  Handle<Code> code() { return code_; }
-  Address pc() { return code_->address() + offset_; }
-  int offset() { return offset_; }
+  Handle<AbstractCode> abstract_code() { return abstract_code_; }
+  int code_offset() { return code_offset_; }
   bool is_constructor() { return is_constructor_; }
 
   void Print();
@@ -553,8 +612,8 @@
  private:
   Handle<Object> receiver_;
   Handle<JSFunction> function_;
-  Handle<Code> code_;
-  int offset_;
+  Handle<AbstractCode> abstract_code_;
+  int code_offset_;
   bool is_constructor_;
 };
 
@@ -617,9 +676,12 @@
   virtual void Summarize(List<FrameSummary>* frames);
 
   // Lookup exception handler for current {pc}, returns -1 if none found. Also
-  // returns the expected number of stack slots at the handler site.
+  // returns data associated with the handler site specific to the frame type:
+  //  - JavaScriptFrame : Data is the stack depth at entry of the try-block.
+  //  - OptimizedFrame  : Data is the stack slot count of the entire frame.
+  //  - InterpretedFrame: Data is the register index holding the context.
   virtual int LookupExceptionHandlerInTable(
-      int* stack_slots, HandlerTable::CatchPrediction* prediction);
+      int* data, HandlerTable::CatchPrediction* prediction);
 
   // Architecture-specific register description.
   static Register fp_register();
@@ -691,10 +753,9 @@
 
   void Summarize(List<FrameSummary>* frames) override;
 
-  // Lookup exception handler for current {pc}, returns -1 if none found. Also
-  // returns the expected number of stack slots at the handler site.
+  // Lookup exception handler for current {pc}, returns -1 if none found.
   int LookupExceptionHandlerInTable(
-      int* stack_slots, HandlerTable::CatchPrediction* prediction) override;
+      int* data, HandlerTable::CatchPrediction* prediction) override;
 
   DeoptimizationInputData* GetDeoptimizationData(int* deopt_index) const;
 
@@ -711,11 +772,38 @@
 
 
 class InterpretedFrame : public JavaScriptFrame {
+ public:
   Type type() const override { return INTERPRETED; }
 
+  // Lookup exception handler for current {pc}, returns -1 if none found.
+  int LookupExceptionHandlerInTable(
+      int* data, HandlerTable::CatchPrediction* prediction) override;
+
+  // Returns the current offset into the bytecode stream.
+  int GetBytecodeOffset() const;
+
+  // Updates the current offset into the bytecode stream, mainly used for stack
+  // unwinding to continue execution at a different bytecode offset.
+  void PatchBytecodeOffset(int new_offset);
+
+  // Returns the frame's current bytecode array.
+  Object* GetBytecodeArray() const;
+
+  // Updates the frame's BytecodeArray with |bytecode_array|. Used by the
+  // debugger to swap execution onto a BytecodeArray patched with breakpoints.
+  void PatchBytecodeArray(Object* bytecode_array);
+
+  // Access to the interpreter register file for this frame.
+  Object* GetInterpreterRegister(int register_index) const;
+
+  // Build a list with summaries for this frame including all inlined frames.
+  void Summarize(List<FrameSummary>* frames) override;
+
  protected:
   inline explicit InterpretedFrame(StackFrameIteratorBase* iterator);
 
+  Address GetExpressionAddress(int n) const override;
+
  private:
   friend class StackFrameIteratorBase;
 };
@@ -740,6 +828,8 @@
   void Print(StringStream* accumulator, PrintMode mode,
              int index) const override;
 
+  static int GetLength(Address fp);
+
  protected:
   inline explicit ArgumentsAdaptorFrame(StackFrameIteratorBase* iterator);
 
diff --git a/src/full-codegen/arm/full-codegen-arm.cc b/src/full-codegen/arm/full-codegen-arm.cc
index 25be8a6..6e6a655 100644
--- a/src/full-codegen/arm/full-codegen-arm.cc
+++ b/src/full-codegen/arm/full-codegen-arm.cc
@@ -19,8 +19,7 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
 
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
@@ -77,6 +76,7 @@
   }
 
  private:
+  MacroAssembler* masm() { return masm_; }
   MacroAssembler* masm_;
   Label patch_site_;
 #ifdef DEBUG
@@ -110,13 +110,6 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-    __ stop("stop-at");
-  }
-#endif
-
   if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
     int receiver_offset = info->scope()->num_parameters() * kPointerSize;
     __ ldr(r2, MemOperand(sp, receiver_offset));
@@ -137,6 +130,7 @@
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
     DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+    OperandStackDepthIncrement(locals_count);
     if (locals_count > 0) {
       if (locals_count >= 128) {
         Label ok;
@@ -264,21 +258,12 @@
   Variable* rest_param = scope()->rest_parameter(&rest_index);
   if (rest_param) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
-
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-
-    __ mov(RestParamAccessDescriptor::parameter_count(),
-           Operand(Smi::FromInt(num_parameters)));
-    __ add(RestParamAccessDescriptor::parameter_pointer(), fp,
-           Operand(StandardFrameConstants::kCallerSPOffset + offset));
-    __ mov(RestParamAccessDescriptor::rest_parameter_index(),
-           Operand(Smi::FromInt(rest_index)));
-    function_in_register_r1 = false;
-
-    RestParamAccessStub stub(isolate());
+    if (!function_in_register_r1) {
+      __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    }
+    FastNewRestParameterStub stub(isolate());
     __ CallStub(&stub);
-
+    function_in_register_r1 = false;
     SetVar(rest_param, r0, r1, r2);
   }
 
@@ -286,28 +271,20 @@
   if (arguments != NULL) {
     // Function uses arguments object.
     Comment cmnt(masm_, "[ Allocate arguments object");
-    DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
     if (!function_in_register_r1) {
       // Load this again, if it's used by the local context below.
       __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     }
-    // Receiver is just before the parameters on the caller's stack.
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-    __ mov(ArgumentsAccessNewDescriptor::parameter_count(),
-           Operand(Smi::FromInt(num_parameters)));
-    __ add(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
-           Operand(StandardFrameConstants::kCallerSPOffset + offset));
-
-    // Arguments to ArgumentsAccessStub:
-    //   function, parameter pointer, parameter count.
-    // The stub will rewrite parameter pointer and parameter count if the
-    // previous stack frame was an arguments adapter frame.
-    bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
-    ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
-        is_unmapped, literal()->has_duplicate_parameters());
-    ArgumentsAccessStub stub(isolate(), type);
-    __ CallStub(&stub);
+    if (is_strict(language_mode()) || !has_simple_parameters()) {
+      FastNewStrictArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    } else if (literal()->has_duplicate_parameters()) {
+      __ Push(r1);
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+    } else {
+      FastNewSloppyArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    }
 
     SetVar(arguments, r0, r1, r2);
   }
@@ -439,6 +416,30 @@
   PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
 }
 
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+    bool is_tail_call) {
+  // Pretend that the exit is a backwards jump to the entry.
+  int weight = 1;
+  if (info_->ShouldSelfOptimize()) {
+    weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+  } else {
+    int distance = masm_->pc_offset();
+    weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+  }
+  EmitProfilingCounterDecrement(weight);
+  Label ok;
+  __ b(pl, &ok);
+  // Don't need to save result register if we are going to do a tail call.
+  if (!is_tail_call) {
+    __ push(r0);
+  }
+  __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+  if (!is_tail_call) {
+    __ pop(r0);
+  }
+  EmitProfilingCounterReset();
+  __ bind(&ok);
+}
 
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
@@ -452,24 +453,7 @@
       __ push(r0);
       __ CallRuntime(Runtime::kTraceExit);
     }
-    // Pretend that the exit is a backwards jump to the entry.
-    int weight = 1;
-    if (info_->ShouldSelfOptimize()) {
-      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-    } else {
-      int distance = masm_->pc_offset();
-      weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kCodeSizeMultiplier));
-    }
-    EmitProfilingCounterDecrement(weight);
-    Label ok;
-    __ b(pl, &ok);
-    __ push(r0);
-    __ Call(isolate()->builtins()->InterruptCheck(),
-            RelocInfo::CODE_TARGET);
-    __ pop(r0);
-    EmitProfilingCounterReset();
-    __ bind(&ok);
+    EmitProfilingCounterHandlingForReturnSequence(false);
 
     // Make sure that the constant pool is not emitted inside of the return
     // sequence.
@@ -492,7 +476,7 @@
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -509,7 +493,7 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Heap::RootListIndex index) const {
   __ LoadRoot(result_register(), index);
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -544,7 +528,7 @@
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
   // Immediates cannot be pushed directly.
   __ mov(result_register(), Operand(lit));
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -553,7 +537,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ b(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -578,41 +562,14 @@
 }
 
 
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
-                                                   Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
-    int count,
-    Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-  __ Move(result_register(), reg);
-}
-
-
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
   DCHECK(count > 0);
-  if (count > 1) __ Drop(count - 1);
+  if (count > 1) codegen()->DropOperands(count - 1);
   __ str(reg, MemOperand(sp, 0));
 }
 
 
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
-                                                 Register reg) const {
-  DCHECK(count > 0);
-  // For simplicity we always test the accumulator register.
-  __ Drop(count);
-  __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
-  codegen()->DoTest(this);
-}
-
-
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
   DCHECK(materialize_true == materialize_false);
@@ -643,7 +600,7 @@
   __ bind(materialize_false);
   __ LoadRoot(ip, Heap::kFalseValueRootIndex);
   __ bind(&done);
-  __ push(ip);
+  codegen()->PushOperand(ip);
 }
 
 
@@ -665,7 +622,7 @@
   Heap::RootListIndex value_root_index =
       flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
   __ LoadRoot(ip, value_root_index);
-  __ push(ip);
+  codegen()->PushOperand(ip);
 }
 
 
@@ -789,7 +746,7 @@
   // The variable in the declaration always resides in the current function
   // context.
   DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
-  if (generate_debug_code_) {
+  if (FLAG_debug_code) {
     // Check that we're not inside a with or catch context.
     __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
     __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
@@ -908,11 +865,11 @@
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ FunctionDeclaration");
       __ mov(r2, Operand(variable->name()));
-      __ Push(r2);
+      PushOperand(r2);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
       break;
     }
   }
@@ -986,8 +943,8 @@
 
     // Record position before stub call for type feedback.
     SetExpressionPosition(clause);
-    Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
-                                             strength(language_mode())).code();
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -1010,7 +967,7 @@
   // Discard the test value and jump to the default if present, otherwise to
   // the end of the statement.
   __ bind(&next_test);
-  __ Drop(1);  // Switch value is no longer needed.
+  DropOperands(1);  // Switch value is no longer needed.
   if (default_clause == NULL) {
     __ b(nested_statement.break_label());
   } else {
@@ -1041,25 +998,21 @@
   ForIn loop_statement(this, stmt);
   increment_loop_depth();
 
-  // Get the object to enumerate over. If the object is null or undefined, skip
-  // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(r0, ip);
-  __ b(eq, &exit);
-  Register null_value = r5;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ cmp(r0, null_value);
-  __ b(eq, &exit);
+  OperandStackDepthIncrement(ForIn::kElementCount);
 
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
-  // Convert the object to a JS object.
+  // If the object is null or undefined, skip over the loop, otherwise convert
+  // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
   Label convert, done_convert;
   __ JumpIfSmi(r0, &convert);
   __ CompareObjectType(r0, r1, r1, FIRST_JS_RECEIVER_TYPE);
   __ b(ge, &done_convert);
+  __ CompareRoot(r0, Heap::kNullValueRootIndex);
+  __ b(eq, &exit);
+  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+  __ b(eq, &exit);
   __ bind(&convert);
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
@@ -1067,16 +1020,14 @@
   PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
   __ push(r0);
 
-  // Check for proxies.
-  Label call_runtime;
-  __ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
-  __ b(eq, &call_runtime);
-
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  __ CheckEnumCache(null_value, &call_runtime);
+  // Note: Proxies never have an enum cache, so will always take the
+  // slow path.
+  Label call_runtime;
+  __ CheckEnumCache(&call_runtime);
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -1087,7 +1038,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(r0);  // Duplicate the enumerable object on the stack.
-  __ CallRuntime(Runtime::kGetPropertyNamesFast);
+  __ CallRuntime(Runtime::kForInEnumerate);
   PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
 
   // If we got a map from the runtime call, we can do a fast
@@ -1125,15 +1076,17 @@
   // We got a fixed array in register r0. Iterate through that.
   __ bind(&fixed_array);
 
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(r1);
   __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-  int vector_index = SmiFromSlot(slot)->value();
   __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(vector_index)));
   __ mov(r1, Operand(Smi::FromInt(1)));  // Smi(1) indicates slow check
   __ Push(r1, r0);  // Smi and array
   __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
+  __ Push(r1);  // Fixed array length (as smi).
+  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
   __ mov(r0, Operand(Smi::FromInt(0)));
-  __ Push(r1, r0);  // Fixed array length (as smi) and initial index.
+  __ Push(r0);  // Initial index.
 
   // Generate code for doing the condition check.
   __ bind(&loop);
@@ -1161,6 +1114,16 @@
   __ cmp(r4, Operand(r2));
   __ b(eq, &update_each);
 
+  // We might get here from TurboFan or Crankshaft when something in the
+  // for-in loop body deopts and only now notice in fullcodegen, that we
+  // can now longer use the enum cache, i.e. left fast mode. So better record
+  // this information here, in case we later OSR back into this loop or
+  // reoptimize the whole function w/o rerunning the loop with the slow
+  // mode object in fullcodegen (which would result in a deopt loop).
+  __ EmitLoadTypeFeedbackVector(r0);
+  __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ str(r2, FieldMemOperand(r0, FixedArray::OffsetOfElementAt(vector_index)));
+
   // Convert the entry to a string or (smi) 0 if it isn't a property
   // any more. If the property has been removed while iterating, we
   // just skip it.
@@ -1200,7 +1163,7 @@
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
-  __ Drop(5);
+  DropOperands(5);
 
   // Exit and decrement the loop depth.
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1444,12 +1407,11 @@
       // by eval-introduced variables.
       EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
       __ bind(&slow);
-      __ mov(r1, Operand(var->name()));
-      __ Push(cp, r1);  // Context and name.
+      __ Push(var->name());
       Runtime::FunctionId function_id =
           typeof_mode == NOT_INSIDE_TYPEOF
               ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotNoReferenceError;
+              : Runtime::kLoadLookupSlotInsideTypeof;
       __ CallRuntime(function_id);
       __ bind(&done);
       context()->Plug(r0);
@@ -1474,7 +1436,7 @@
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
     __ LoadRoot(r1, Heap::kNullValueRootIndex);
-    __ push(r1);
+    PushOperand(r1);
   } else {
     VisitForStackValue(expression);
     if (NeedsHomeObject(expression)) {
@@ -1519,7 +1481,7 @@
     Literal* key = property->key()->AsLiteral();
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(r0);  // Save result on stack
+      PushOperand(r0);  // Save result on stack
       result_saved = true;
     }
     switch (property->kind()) {
@@ -1551,7 +1513,7 @@
         }
         // Duplicate receiver on stack.
         __ ldr(r0, MemOperand(sp));
-        __ push(r0);
+        PushOperand(r0);
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
@@ -1559,19 +1521,19 @@
             EmitSetHomeObject(value, 2, property->GetSlot());
           }
           __ mov(r0, Operand(Smi::FromInt(SLOPPY)));  // PropertyAttributes
-          __ push(r0);
-          __ CallRuntime(Runtime::kSetProperty);
+          PushOperand(r0);
+          CallRuntimeWithOperands(Runtime::kSetProperty);
         } else {
-          __ Drop(3);
+          DropOperands(3);
         }
         break;
       case ObjectLiteral::Property::PROTOTYPE:
         // Duplicate receiver on stack.
         __ ldr(r0, MemOperand(sp));
-        __ push(r0);
+        PushOperand(r0);
         VisitForStackValue(value);
         DCHECK(property->emit_store());
-        __ CallRuntime(Runtime::kInternalSetPrototype);
+        CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                                NO_REGISTERS);
         break;
@@ -1595,13 +1557,13 @@
        it != accessor_table.end();
        ++it) {
     __ ldr(r0, MemOperand(sp));  // Duplicate receiver.
-    __ push(r0);
+    PushOperand(r0);
     VisitForStackValue(it->first);
     EmitAccessor(it->second->getter);
     EmitAccessor(it->second->setter);
     __ mov(r0, Operand(Smi::FromInt(NONE)));
-    __ push(r0);
-    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+    PushOperand(r0);
+    CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1618,18 +1580,18 @@
 
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(r0);  // Save result on the stack
+      PushOperand(r0);  // Save result on the stack
       result_saved = true;
     }
 
     __ ldr(r0, MemOperand(sp));  // Duplicate receiver.
-    __ push(r0);
+    PushOperand(r0);
 
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
       DCHECK(!property->is_computed_name());
       VisitForStackValue(value);
       DCHECK(property->emit_store());
-      __ CallRuntime(Runtime::kInternalSetPrototype);
+      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                              NO_REGISTERS);
     } else {
@@ -1644,11 +1606,11 @@
         case ObjectLiteral::Property::MATERIALIZED_LITERAL:
         case ObjectLiteral::Property::COMPUTED:
           if (property->emit_store()) {
-            __ mov(r0, Operand(Smi::FromInt(NONE)));
-            __ push(r0);
-            __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+            PushOperand(Smi::FromInt(NONE));
+            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
           } else {
-            __ Drop(3);
+            DropOperands(3);
           }
           break;
 
@@ -1657,15 +1619,13 @@
           break;
 
         case ObjectLiteral::Property::GETTER:
-          __ mov(r0, Operand(Smi::FromInt(NONE)));
-          __ push(r0);
-          __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
           break;
 
         case ObjectLiteral::Property::SETTER:
-          __ mov(r0, Operand(Smi::FromInt(NONE)));
-          __ push(r0);
-          __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
           break;
       }
     }
@@ -1724,14 +1684,14 @@
   int array_index = 0;
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
-    if (subexpr->IsSpread()) break;
+    DCHECK(!subexpr->IsSpread());
 
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
-      __ push(r0);
+      PushOperand(r0);
       result_saved = true;
     }
     VisitForAccumulatorValue(subexpr);
@@ -1752,21 +1712,16 @@
   // (inclusive) and these elements gets appended to the array. Note that the
   // number elements an iterable produces is unknown ahead of time.
   if (array_index < length && result_saved) {
-    __ Pop(r0);
+    PopOperand(r0);
     result_saved = false;
   }
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
 
-    __ Push(r0);
-    if (subexpr->IsSpread()) {
-      VisitForStackValue(subexpr->AsSpread()->expression());
-      __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
-                       CALL_FUNCTION);
-    } else {
-      VisitForStackValue(subexpr);
-      __ CallRuntime(Runtime::kAppendElement);
-    }
+    PushOperand(r0);
+    DCHECK(!subexpr->IsSpread());
+    VisitForStackValue(subexpr);
+    CallRuntimeWithOperands(Runtime::kAppendElement);
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
   }
@@ -1807,12 +1762,12 @@
           property->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           property->obj()->AsSuperPropertyReference()->home_object());
-      __ Push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
         const Register scratch = r1;
         __ ldr(scratch, MemOperand(sp, kPointerSize));
-        __ Push(scratch);
-        __ Push(result_register());
+        PushOperand(scratch);
+        PushOperand(result_register());
       }
       break;
     case KEYED_SUPER_PROPERTY:
@@ -1821,14 +1776,14 @@
       VisitForStackValue(
           property->obj()->AsSuperPropertyReference()->home_object());
       VisitForAccumulatorValue(property->key());
-      __ Push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
         const Register scratch = r1;
         __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
-        __ Push(scratch);
+        PushOperand(scratch);
         __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
-        __ Push(scratch);
-        __ Push(result_register());
+        PushOperand(scratch);
+        PushOperand(result_register());
       }
       break;
     case KEYED_PROPERTY:
@@ -1874,7 +1829,7 @@
     }
 
     Token::Value op = expr->binary_op();
-    __ push(r0);  // Left operand goes on the stack.
+    PushOperand(r0);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
 
     AccumulatorValueContext context(this);
@@ -1940,8 +1895,16 @@
 
       __ jmp(&suspend);
       __ bind(&continuation);
+      // When we arrive here, the stack top is the resume mode and
+      // result_register() holds the input value (the argument given to the
+      // respective resume operation).
       __ RecordGeneratorContinuation();
-      __ jmp(&resume);
+      __ pop(r1);
+      __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+      __ b(ne, &resume);
+      __ push(result_register());
+      EmitCreateIteratorResult(true);
+      EmitUnwindAndReturn();
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
@@ -1959,7 +1922,7 @@
       __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
-      __ pop(result_register());
+      PopOperand(result_register());
       EmitReturnSequence();
 
       __ bind(&resume);
@@ -1968,123 +1931,15 @@
     }
 
     case Yield::kFinal: {
-      VisitForAccumulatorValue(expr->generator_object());
-      __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
-      __ str(r1, FieldMemOperand(result_register(),
-                                 JSGeneratorObject::kContinuationOffset));
       // Pop value from top-of-stack slot, box result into result register.
+      OperandStackDepthDecrement(1);
       EmitCreateIteratorResult(true);
-      EmitUnwindBeforeReturn();
-      EmitReturnSequence();
+      EmitUnwindAndReturn();
       break;
     }
 
-    case Yield::kDelegating: {
-      VisitForStackValue(expr->generator_object());
-
-      // Initial stack layout is as follows:
-      // [sp + 1 * kPointerSize] iter
-      // [sp + 0 * kPointerSize] g
-
-      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
-      Label l_next, l_call, l_loop;
-      Register load_receiver = LoadDescriptor::ReceiverRegister();
-      Register load_name = LoadDescriptor::NameRegister();
-
-      // Initial send value is undefined.
-      __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-      __ b(&l_next);
-
-      // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
-      __ bind(&l_catch);
-      __ LoadRoot(load_name, Heap::kthrow_stringRootIndex);  // "throw"
-      __ ldr(r3, MemOperand(sp, 1 * kPointerSize));          // iter
-      __ Push(load_name, r3, r0);                       // "throw", iter, except
-      __ jmp(&l_call);
-
-      // try { received = %yield result }
-      // Shuffle the received result above a try handler and yield it without
-      // re-boxing.
-      __ bind(&l_try);
-      __ pop(r0);                                        // result
-      int handler_index = NewHandlerTableEntry();
-      EnterTryBlock(handler_index, &l_catch);
-      const int try_block_size = TryCatch::kElementCount * kPointerSize;
-      __ push(r0);                                       // result
-
-      __ jmp(&l_suspend);
-      __ bind(&l_continuation);
-      __ RecordGeneratorContinuation();
-      __ jmp(&l_resume);
-
-      __ bind(&l_suspend);
-      const int generator_object_depth = kPointerSize + try_block_size;
-      __ ldr(r0, MemOperand(sp, generator_object_depth));
-      __ push(r0);                                       // g
-      __ Push(Smi::FromInt(handler_index));              // handler-index
-      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
-      __ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
-      __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
-      __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
-      __ mov(r1, cp);
-      __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
-                          kLRHasBeenSaved, kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
-      __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ pop(r0);                                      // result
-      EmitReturnSequence();
-      __ bind(&l_resume);                              // received in r0
-      ExitTryBlock(handler_index);
-
-      // receiver = iter; f = 'next'; arg = received;
-      __ bind(&l_next);
-
-      __ LoadRoot(load_name, Heap::knext_stringRootIndex);  // "next"
-      __ ldr(r3, MemOperand(sp, 1 * kPointerSize));         // iter
-      __ Push(load_name, r3, r0);                      // "next", iter, received
-
-      // result = receiver[f](arg);
-      __ bind(&l_call);
-      __ ldr(load_receiver, MemOperand(sp, kPointerSize));
-      __ ldr(load_name, MemOperand(sp, 2 * kPointerSize));
-      __ mov(LoadDescriptor::SlotRegister(),
-             Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
-      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
-      CallIC(ic, TypeFeedbackId::None());
-      __ mov(r1, r0);
-      __ str(r1, MemOperand(sp, 2 * kPointerSize));
-      SetCallPosition(expr);
-      __ mov(r0, Operand(1));
-      __ Call(
-          isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
-          RelocInfo::CODE_TARGET);
-
-      __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ Drop(1);  // The function is still on the stack; drop it.
-
-      // if (!result.done) goto l_try;
-      __ bind(&l_loop);
-      __ Move(load_receiver, r0);
-
-      __ push(load_receiver);                               // save result
-      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
-      __ mov(LoadDescriptor::SlotRegister(),
-             Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);  // r0=result.done
-      Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
-      CallIC(bool_ic);
-      __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
-      __ b(ne, &l_try);
-
-      // result.value
-      __ pop(load_receiver);                                 // result
-      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
-      __ mov(LoadDescriptor::SlotRegister(),
-             Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);                         // r0=result.value
-      context()->DropAndPlug(2, r0);                         // drop iter and g
-      break;
-    }
+    case Yield::kDelegating:
+      UNREACHABLE();
   }
 }
 
@@ -2098,7 +1953,14 @@
   // r1 will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
   VisitForAccumulatorValue(value);
-  __ pop(r1);
+  PopOperand(r1);
+
+  // Store input value into generator object.
+  __ str(result_register(),
+         FieldMemOperand(r1, JSGeneratorObject::kInputOffset));
+  __ mov(r2, result_register());
+  __ RecordWriteField(r1, JSGeneratorObject::kInputOffset, r2, r3,
+                      kLRHasBeenSaved, kDontSaveFPRegs);
 
   // Load suspended function and context.
   __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
@@ -2160,6 +2022,7 @@
       __ add(r3, r3, r2);
       __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
       __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+      __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
       __ Jump(r3);
     }
     __ bind(&slow_resume);
@@ -2174,6 +2037,7 @@
   __ push(r2);
   __ b(&push_operand_holes);
   __ bind(&call_resume);
+  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
   DCHECK(!result_register().is(r1));
   __ Push(r1, result_register());
   __ Push(Smi::FromInt(resume_mode));
@@ -2185,6 +2049,25 @@
   context()->Plug(result_register());
 }
 
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+  OperandStackDepthIncrement(2);
+  __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+  OperandStackDepthDecrement(2);
+  __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+  if (FLAG_debug_code) {
+    int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+                        operand_stack_depth_ * kPointerSize;
+    __ sub(r0, fp, sp);
+    __ cmp(r0, Operand(expected_diff));
+    __ Assert(eq, kUnexpectedStackDepth);
+  }
+}
 
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
@@ -2218,37 +2101,7 @@
   __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
   __ mov(LoadDescriptor::SlotRegister(),
          Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object.
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(prop->IsSuperAccess());
-
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
-  __ mov(LoadDescriptor::SlotRegister(),
-         Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object, key.
-  SetExpressionPosition(prop);
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallLoadIC(NOT_INSIDE_TYPEOF);
 }
 
 
@@ -2264,7 +2117,7 @@
   // Get the arguments.
   Register left = r1;
   Register right = r0;
-  __ pop(left);
+  PopOperand(left);
 
   // Perform combined smi check on both operands.
   __ orr(scratch1, left, Operand(right));
@@ -2273,8 +2126,7 @@
   patch_site.EmitJumpIfSmi(scratch1, &smi_case);
 
   __ bind(&stub_call);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ jmp(&done);
@@ -2347,27 +2199,17 @@
 
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  // Constructor is in r0.
-  DCHECK(lit != NULL);
-  __ push(r0);
-
-  // No access check is needed here since the constructor is created by the
-  // class literal.
-  Register scratch = r1;
-  __ ldr(scratch,
-         FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
-  __ push(scratch);
-
   for (int i = 0; i < lit->properties()->length(); i++) {
     ObjectLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
+    Register scratch = r1;
     if (property->is_static()) {
       __ ldr(scratch, MemOperand(sp, kPointerSize));  // constructor
     } else {
       __ ldr(scratch, MemOperand(sp, 0));  // prototype
     }
-    __ push(scratch);
+    PushOperand(scratch);
     EmitPropertyKey(property, lit->GetIdForProperty(i));
 
     // The static prototype property is read only. We handle the non computed
@@ -2390,36 +2232,31 @@
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();
       case ObjectLiteral::Property::COMPUTED:
-        __ CallRuntime(Runtime::kDefineClassMethod);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
       case ObjectLiteral::Property::GETTER:
-        __ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
-        __ push(r0);
-        __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
       case ObjectLiteral::Property::SETTER:
-        __ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
-        __ push(r0);
-        __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
       default:
         UNREACHABLE();
     }
   }
-
-  // Set both the prototype and constructor to have fast properties, and also
-  // freeze them in strong mode.
-  __ CallRuntime(Runtime::kFinalizeClassDefinition);
 }
 
 
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
-  __ pop(r1);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  PopOperand(r1);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
@@ -2442,10 +2279,10 @@
       break;
     }
     case NAMED_PROPERTY: {
-      __ push(r0);  // Preserve value.
+      PushOperand(r0);  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
       __ Move(StoreDescriptor::ReceiverRegister(), r0);
-      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
       __ mov(StoreDescriptor::NameRegister(),
              Operand(prop->key()->AsLiteral()->value()));
       EmitLoadStoreICSlot(slot);
@@ -2453,7 +2290,7 @@
       break;
     }
     case NAMED_SUPER_PROPERTY: {
-      __ Push(r0);
+      PushOperand(r0);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2470,7 +2307,7 @@
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      __ Push(r0);
+      PushOperand(r0);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForStackValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2490,12 +2327,12 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ push(r0);  // Preserve value.
+      PushOperand(r0);  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ Move(StoreDescriptor::NameRegister(), r0);
-      __ Pop(StoreDescriptor::ValueRegister(),
-             StoreDescriptor::ReceiverRegister());
+      PopOperands(StoreDescriptor::ValueRegister(),
+                  StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(slot);
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2578,17 +2415,17 @@
              (var->mode() == CONST && op == Token::INIT)) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
-      __ push(r0);  // Value.
-      __ mov(r1, Operand(var->name()));
-      __ mov(r0, Operand(Smi::FromInt(language_mode())));
-      __ Push(cp, r1, r0);  // Context, name, language mode.
-      __ CallRuntime(Runtime::kStoreLookupSlot);
+      __ Push(var->name());
+      __ Push(r0);
+      __ CallRuntime(is_strict(language_mode())
+                         ? Runtime::kStoreLookupSlot_Strict
+                         : Runtime::kStoreLookupSlot_Sloppy);
     } else {
       // Assignment to var or initializing assignment to let/const in harmony
       // mode.
       DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
       MemOperand location = VarOperand(var, r1);
-      if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
         // Check for an uninitialized let binding.
         __ ldr(r2, location);
         __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
@@ -2634,7 +2471,7 @@
 
   __ mov(StoreDescriptor::NameRegister(),
          Operand(prop->key()->AsLiteral()->value()));
-  __ pop(StoreDescriptor::ReceiverRegister());
+  PopOperand(StoreDescriptor::ReceiverRegister());
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
@@ -2651,10 +2488,11 @@
   Literal* key = prop->key()->AsLiteral();
   DCHECK(key != NULL);
 
-  __ Push(key->value());
-  __ Push(r0);
-  __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
-                                             : Runtime::kStoreToSuper_Sloppy));
+  PushOperand(key->value());
+  PushOperand(r0);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreToSuper_Strict
+                              : Runtime::kStoreToSuper_Sloppy);
 }
 
 
@@ -2664,16 +2502,17 @@
   // stack : receiver ('this'), home_object, key
   DCHECK(prop != NULL);
 
-  __ Push(r0);
-  __ CallRuntime((is_strict(language_mode())
-                      ? Runtime::kStoreKeyedToSuper_Strict
-                      : Runtime::kStoreKeyedToSuper_Sloppy));
+  PushOperand(r0);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreKeyedToSuper_Strict
+                              : Runtime::kStoreKeyedToSuper_Sloppy);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
-  __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+  PopOperands(StoreDescriptor::ReceiverRegister(),
+              StoreDescriptor::NameRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(r0));
 
   Handle<Code> ic =
@@ -2708,7 +2547,7 @@
       VisitForStackValue(expr->obj());
       VisitForAccumulatorValue(expr->key());
       __ Move(LoadDescriptor::NameRegister(), r0);
-      __ pop(LoadDescriptor::ReceiverRegister());
+      PopOperand(LoadDescriptor::ReceiverRegister());
       EmitKeyedPropertyLoad(expr);
     } else {
       VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2747,7 +2586,7 @@
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ push(ip);
+    PushOperand(ip);
     convert_mode = ConvertReceiverMode::kNullOrUndefined;
   } else {
     // Load the function from the receiver.
@@ -2758,7 +2597,7 @@
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
     __ ldr(ip, MemOperand(sp, 0));
-    __ push(ip);
+    PushOperand(ip);
     __ str(r0, MemOperand(sp, kPointerSize));
     convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
   }
@@ -2781,12 +2620,11 @@
   SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
   VisitForStackValue(super_ref->home_object());
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(r0);
-  __ Push(r0);
+  PushOperand(r0);
+  PushOperand(r0);
   __ ldr(scratch, MemOperand(sp, kPointerSize * 2));
-  __ Push(scratch);
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
+  PushOperand(scratch);
+  PushOperand(key->value());
 
   // Stack here:
   //  - home_object
@@ -2794,8 +2632,7 @@
   //  - this (receiver) <-- LoadFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
 
   // Replace home_object with target function.
   __ str(r0, MemOperand(sp, kPointerSize));
@@ -2824,7 +2661,7 @@
 
   // Push the target function under the receiver.
   __ ldr(ip, MemOperand(sp, 0));
-  __ push(ip);
+  PushOperand(ip);
   __ str(r0, MemOperand(sp, kPointerSize));
 
   EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2843,12 +2680,11 @@
   SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
   VisitForStackValue(super_ref->home_object());
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(r0);
-  __ Push(r0);
+  PushOperand(r0);
+  PushOperand(r0);
   __ ldr(scratch, MemOperand(sp, kPointerSize * 2));
-  __ Push(scratch);
+  PushOperand(scratch);
   VisitForStackValue(prop->key());
-  __ Push(Smi::FromInt(language_mode()));
 
   // Stack here:
   //  - home_object
@@ -2856,8 +2692,7 @@
   //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
 
   // Replace home_object with target function.
   __ str(r0, MemOperand(sp, kPointerSize));
@@ -2879,12 +2714,23 @@
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
   SetCallPosition(expr);
-  Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+  if (expr->tail_call_mode() == TailCallMode::kAllow) {
+    if (FLAG_trace) {
+      __ CallRuntime(Runtime::kTraceTailCall);
+    }
+    // Update profiling counters before the tail call since we will
+    // not return to this function.
+    EmitProfilingCounterHandlingForReturnSequence(true);
+  }
+  Handle<Code> ic =
+      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+          .code();
   __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   // Don't assign a type feedback id to the IC, since type feedback is provided
   // by the vector above.
   CallIC(ic);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2929,11 +2775,9 @@
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in r0)
     // and the object holding it (returned in edx).
-    DCHECK(!context_register().is(r2));
-    __ mov(r2, Operand(callee->name()));
-    __ Push(context_register(), r2);
-    __ CallRuntime(Runtime::kLoadLookupSlot);
-    __ Push(r0, r1);  // Function, receiver.
+    __ Push(callee->name());
+    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+    PushOperands(r0, r1);  // Function, receiver.
     PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the
@@ -2955,7 +2799,7 @@
     VisitForStackValue(callee);
     // refEnv.WithBaseObject()
     __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-    __ push(r2);  // Reserved receiver slot.
+    PushOperand(r2);  // Reserved receiver slot.
   }
 }
 
@@ -2989,7 +2833,10 @@
   SetCallPosition(expr);
   __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ mov(r0, Operand(arg_count));
-  __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                      expr->tail_call_mode()),
+          RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3030,6 +2877,7 @@
 
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3050,7 +2898,7 @@
          FieldMemOperand(result_register(), HeapObject::kMapOffset));
   __ ldr(result_register(),
          FieldMemOperand(result_register(), Map::kPrototypeOffset));
-  __ Push(result_register());
+  PushOperand(result_register());
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = expr->arguments();
@@ -3072,6 +2920,7 @@
   __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
 
   __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
 
@@ -3124,76 +2973,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(r0, if_false);
-  __ CompareObjectType(r0, r1, r1, SIMD128_VALUE_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(r0, if_false);
-  __ CompareObjectType(r0, r1, r2, FIRST_FUNCTION_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(hs, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
-  __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-  __ ldr(r1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
-  __ cmp(r2, Operand(0x80000000));
-  __ cmp(r1, Operand(0x00000000), eq);
-
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3282,64 +3061,6 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ pop(r1);
-  __ cmp(r0, r1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in edx and the formal
-  // parameter count in r0.
-  VisitForAccumulatorValue(args->at(0));
-  __ mov(r1, r0);
-  __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-  ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
-  __ CallStub(&stub);
-  context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  DCHECK(expr->arguments()->length() == 0);
-
-  // Get the number of formal parameters.
-  __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
-  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset), eq);
-
-  context()->Plug(r0);
-}
-
-
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3408,28 +3129,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = nullptr;
-  Label* if_false = nullptr;
-  Label* fall_through = nullptr;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(r0, if_false);
-  __ CompareObjectType(r0, r1, r1, JS_DATE_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(3, args->length());
@@ -3441,7 +3140,7 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(index, value);
+  PopOperands(index, value);
 
   if (FLAG_debug_code) {
     __ SmiTst(value);
@@ -3474,7 +3173,7 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(index, value);
+  PopOperands(index, value);
 
   if (FLAG_debug_code) {
     __ SmiTst(value);
@@ -3497,34 +3196,6 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(0));  // Load the object.
-  VisitForAccumulatorValue(args->at(1));  // Load the value.
-  __ pop(r1);  // r0 = value. r1 = object.
-
-  Label done;
-  // If the object is a smi, return the value.
-  __ JumpIfSmi(r1, &done);
-
-  // If the object is not a value type, return the value.
-  __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
-  __ b(ne, &done);
-
-  // Store the value.
-  __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  __ mov(r2, r0);
-  __ RecordWriteField(
-      r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
-
-  __ bind(&done);
-  context()->Plug(r0);
-}
-
-
 void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3542,26 +3213,6 @@
 }
 
 
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into r0 and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  Label convert, done_convert;
-  __ JumpIfSmi(r0, &convert);
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ CompareObjectType(r0, r1, r1, LAST_NAME_TYPE);
-  __ b(ls, &done_convert);
-  __ bind(&convert);
-  __ Push(r0);
-  __ CallRuntime(Runtime::kToName);
-  __ bind(&done_convert);
-  context()->Plug(r0);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3590,7 +3241,7 @@
   Register index = r0;
   Register result = r3;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3636,7 +3287,7 @@
   Register scratch = r3;
   Register result = r0;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3686,6 +3337,7 @@
   // Call the target.
   __ mov(r0, Operand(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(argc + 1);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   // Discard the function left on TOS.
@@ -3738,243 +3390,6 @@
 }
 
 
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
-  Label bailout, done, one_char_separator, long_separator, non_trivial_array,
-      not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
-      one_char_separator_loop_entry, long_separator_loop;
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(1));
-  VisitForAccumulatorValue(args->at(0));
-
-  // All aliases of the same register have disjoint lifetimes.
-  Register array = r0;
-  Register elements = no_reg;  // Will be r0.
-  Register result = no_reg;  // Will be r0.
-  Register separator = r1;
-  Register array_length = r2;
-  Register result_pos = no_reg;  // Will be r2
-  Register string_length = r3;
-  Register string = r4;
-  Register element = r5;
-  Register elements_end = r6;
-  Register scratch = r9;
-
-  // Separator operand is on the stack.
-  __ pop(separator);
-
-  // Check that the array is a JSArray.
-  __ JumpIfSmi(array, &bailout);
-  __ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
-  __ b(ne, &bailout);
-
-  // Check that the array has fast elements.
-  __ CheckFastElements(scratch, array_length, &bailout);
-
-  // If the array has length zero, return the empty string.
-  __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
-  __ SmiUntag(array_length, SetCC);
-  __ b(ne, &non_trivial_array);
-  __ LoadRoot(r0, Heap::kempty_stringRootIndex);
-  __ b(&done);
-
-  __ bind(&non_trivial_array);
-
-  // Get the FixedArray containing array's elements.
-  elements = array;
-  __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
-  array = no_reg;  // End of array's live range.
-
-  // Check that all array elements are sequential one-byte strings, and
-  // accumulate the sum of their lengths, as a smi-encoded value.
-  __ mov(string_length, Operand::Zero());
-  __ add(element,
-         elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
-  // Loop condition: while (element < elements_end).
-  // Live values in registers:
-  //   elements: Fixed array of strings.
-  //   array_length: Length of the fixed array of strings (not smi)
-  //   separator: Separator string
-  //   string_length: Accumulated sum of string lengths (smi).
-  //   element: Current array element.
-  //   elements_end: Array end.
-  if (generate_debug_code_) {
-    __ cmp(array_length, Operand::Zero());
-    __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
-  }
-  __ bind(&loop);
-  __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
-  __ JumpIfSmi(string, &bailout);
-  __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
-  __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
-  __ add(string_length, string_length, Operand(scratch), SetCC);
-  __ b(vs, &bailout);
-  __ cmp(element, elements_end);
-  __ b(lt, &loop);
-
-  // If array_length is 1, return elements[0], a string.
-  __ cmp(array_length, Operand(1));
-  __ b(ne, &not_size_one_array);
-  __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
-  __ b(&done);
-
-  __ bind(&not_size_one_array);
-
-  // Live values in registers:
-  //   separator: Separator string
-  //   array_length: Length of the array.
-  //   string_length: Sum of string lengths (smi).
-  //   elements: FixedArray of strings.
-
-  // Check that the separator is a flat one-byte string.
-  __ JumpIfSmi(separator, &bailout);
-  __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
-  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
-
-  // Add (separator length times array_length) - separator length to the
-  // string_length to get the length of the result string. array_length is not
-  // smi but the other values are, so the result is a smi
-  __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
-  __ sub(string_length, string_length, Operand(scratch));
-  __ smull(scratch, ip, array_length, scratch);
-  // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
-  // zero.
-  __ cmp(ip, Operand::Zero());
-  __ b(ne, &bailout);
-  __ tst(scratch, Operand(0x80000000));
-  __ b(ne, &bailout);
-  __ add(string_length, string_length, Operand(scratch), SetCC);
-  __ b(vs, &bailout);
-  __ SmiUntag(string_length);
-
-  // Bailout for large object allocations.
-  __ cmp(string_length, Operand(Page::kMaxRegularHeapObjectSize));
-  __ b(gt, &bailout);
-
-  // Get first element in the array to free up the elements register to be used
-  // for the result.
-  __ add(element,
-         elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  result = elements;  // End of live range for elements.
-  elements = no_reg;
-  // Live values in registers:
-  //   element: First array element
-  //   separator: Separator string
-  //   string_length: Length of result string (not smi)
-  //   array_length: Length of the array.
-  __ AllocateOneByteString(result, string_length, scratch,
-                           string,        // used as scratch
-                           elements_end,  // used as scratch
-                           &bailout);
-  // Prepare for looping. Set up elements_end to end of the array. Set
-  // result_pos to the position of the result where to write the first
-  // character.
-  __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
-  result_pos = array_length;  // End of live range for array_length.
-  array_length = no_reg;
-  __ add(result_pos,
-         result,
-         Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  // Check the length of the separator.
-  __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
-  __ cmp(scratch, Operand(Smi::FromInt(1)));
-  __ b(eq, &one_char_separator);
-  __ b(gt, &long_separator);
-
-  // Empty separator case
-  __ bind(&empty_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-
-  // Copy next array element to the result.
-  __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
-  __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ add(string,
-         string,
-         Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ cmp(element, elements_end);
-  __ b(lt, &empty_separator_loop);  // End while (element < elements_end).
-  DCHECK(result.is(r0));
-  __ b(&done);
-
-  // One-character separator case
-  __ bind(&one_char_separator);
-  // Replace separator with its one-byte character value.
-  __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&one_char_separator_loop_entry);
-
-  __ bind(&one_char_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-  //   separator: Single separator one-byte char (in lower byte).
-
-  // Copy the separator character to the result.
-  __ strb(separator, MemOperand(result_pos, 1, PostIndex));
-
-  // Copy next array element to the result.
-  __ bind(&one_char_separator_loop_entry);
-  __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
-  __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ add(string,
-         string,
-         Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ cmp(element, elements_end);
-  __ b(lt, &one_char_separator_loop);  // End while (element < elements_end).
-  DCHECK(result.is(r0));
-  __ b(&done);
-
-  // Long separator case (separator is more than one character). Entry is at the
-  // label long_separator below.
-  __ bind(&long_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-  //   separator: Separator string.
-
-  // Copy the separator to the result.
-  __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ add(string,
-         separator,
-         Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-
-  __ bind(&long_separator);
-  __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
-  __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ add(string,
-         string,
-         Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ cmp(element, elements_end);
-  __ b(lt, &long_separator_loop);  // End while (element < elements_end).
-  DCHECK(result.is(r0));
-  __ b(&done);
-
-  __ bind(&bailout);
-  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-  __ bind(&done);
-  context()->Plug(r0);
-}
-
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -4008,7 +3423,7 @@
   __ b(&done);
 
   __ bind(&runtime);
-  __ CallRuntime(Runtime::kCreateIterResultObject);
+  CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
 
   __ bind(&done);
   context()->Plug(r0);
@@ -4018,7 +3433,7 @@
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
   // Push undefined as the receiver.
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-  __ push(r0);
+  PushOperand(r0);
 
   __ LoadNativeContextSlot(expr->context_index(), r0);
 }
@@ -4033,6 +3448,7 @@
   __ mov(r0, Operand(arg_count));
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 }
 
 
@@ -4046,7 +3462,7 @@
 
     // Push the target function under the receiver.
     __ ldr(ip, MemOperand(sp, 0));
-    __ push(ip);
+    PushOperand(ip);
     __ str(r0, MemOperand(sp, kPointerSize));
 
     // Push the arguments ("left-to-right").
@@ -4082,6 +3498,7 @@
         // Call the C runtime function.
         PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
         __ CallRuntime(expr->function(), arg_count);
+        OperandStackDepthDecrement(arg_count);
         context()->Plug(r0);
       }
     }
@@ -4099,9 +3516,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ CallRuntime(is_strict(language_mode())
-                           ? Runtime::kDeleteProperty_Strict
-                           : Runtime::kDeleteProperty_Sloppy);
+        CallRuntimeWithOperands(is_strict(language_mode())
+                                    ? Runtime::kDeleteProperty_Strict
+                                    : Runtime::kDeleteProperty_Sloppy);
         context()->Plug(r0);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
@@ -4122,9 +3539,7 @@
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
-          DCHECK(!context_register().is(r2));
-          __ mov(r2, Operand(var->name()));
-          __ Push(context_register(), r2);
+          __ Push(var->name());
           __ CallRuntime(Runtime::kDeleteLookupSlot);
           context()->Plug(r0);
         }
@@ -4169,6 +3584,7 @@
                         &materialize_false,
                         &materialize_true,
                         &materialize_true);
+        if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
         PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
         __ LoadRoot(r0, Heap::kTrueValueRootIndex);
@@ -4219,7 +3635,7 @@
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
       __ mov(ip, Operand(Smi::FromInt(0)));
-      __ push(ip);
+      PushOperand(ip);
     }
     switch (assign_type) {
       case NAMED_PROPERTY: {
@@ -4234,11 +3650,11 @@
         VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
         VisitForAccumulatorValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
-        __ Push(result_register());
+        PushOperand(result_register());
         const Register scratch = r1;
         __ ldr(scratch, MemOperand(sp, kPointerSize));
-        __ Push(scratch);
-        __ Push(result_register());
+        PushOperand(scratch);
+        PushOperand(result_register());
         EmitNamedSuperPropertyLoad(prop);
         break;
       }
@@ -4248,13 +3664,13 @@
         VisitForStackValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
         VisitForAccumulatorValue(prop->key());
-        __ Push(result_register());
+        PushOperand(result_register());
         const Register scratch = r1;
         __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
-        __ Push(scratch);
+        PushOperand(scratch);
         __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
-        __ Push(scratch);
-        __ Push(result_register());
+        PushOperand(scratch);
+        PushOperand(result_register());
         EmitKeyedSuperPropertyLoad(prop);
         break;
       }
@@ -4338,7 +3754,7 @@
       // of the stack.
       switch (assign_type) {
         case VARIABLE:
-          __ push(r0);
+          PushOperand(r0);
           break;
         case NAMED_PROPERTY:
           __ str(r0, MemOperand(sp, kPointerSize));
@@ -4363,8 +3779,7 @@
 
   SetExpressionPosition(expr);
 
-  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
-                                              strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
   CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
@@ -4397,7 +3812,7 @@
     case NAMED_PROPERTY: {
       __ mov(StoreDescriptor::NameRegister(),
              Operand(prop->key()->AsLiteral()->value()));
-      __ pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4433,8 +3848,8 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ Pop(StoreDescriptor::ReceiverRegister(),
-             StoreDescriptor::NameRegister());
+      PopOperands(StoreDescriptor::ReceiverRegister(),
+                  StoreDescriptor::NameRegister());
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
@@ -4489,8 +3904,8 @@
     __ CompareRoot(r0, Heap::kFalseValueRootIndex);
     Split(eq, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
-    __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
-    __ b(eq, if_true);
+    __ CompareRoot(r0, Heap::kNullValueRootIndex);
+    __ b(eq, if_false);
     __ JumpIfSmi(r0, if_false);
     // Check for undetectable objects => true.
     __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -4556,7 +3971,7 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      __ CallRuntime(Runtime::kHasProperty);
+      CallRuntimeWithOperands(Runtime::kHasProperty);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(r0, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
@@ -4564,7 +3979,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
-      __ pop(r1);
+      PopOperand(r1);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4576,7 +3991,7 @@
     default: {
       VisitForAccumulatorValue(expr->right());
       Condition cond = CompareIC::ComputeCondition(op);
-      __ pop(r1);
+      PopOperand(r1);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4589,8 +4004,7 @@
         __ bind(&slow_case);
       }
 
-      Handle<Code> ic = CodeFactory::CompareIC(
-                            isolate(), op, strength(language_mode())).code();
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
       PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4678,7 +4092,7 @@
     DCHECK(closure_scope->is_function_scope());
     __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   }
-  __ push(ip);
+  PushOperand(ip);
 }
 
 
@@ -4687,21 +4101,12 @@
 
 void FullCodeGenerator::EnterFinallyBlock() {
   DCHECK(!result_register().is(r1));
-  // Store result register while executing finally block.
-  __ push(result_register());
-  // Cook return address in link register to stack (smi encoded Code* delta)
-  __ sub(r1, lr, Operand(masm_->CodeObject()));
-  __ SmiTag(r1);
-
-  // Store result register while executing finally block.
-  __ push(r1);
-
   // Store pending message while executing finally block.
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ mov(ip, Operand(pending_message_obj));
   __ ldr(r1, MemOperand(ip));
-  __ push(r1);
+  PushOperand(r1);
 
   ClearPendingMessage();
 }
@@ -4710,19 +4115,11 @@
 void FullCodeGenerator::ExitFinallyBlock() {
   DCHECK(!result_register().is(r1));
   // Restore pending message from stack.
-  __ pop(r1);
+  PopOperand(r1);
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ mov(ip, Operand(pending_message_obj));
   __ str(r1, MemOperand(ip));
-
-  // Restore result register from stack.
-  __ pop(r1);
-
-  // Uncook return address and return.
-  __ pop(result_register());
-  __ SmiUntag(r1);
-  __ add(pc, r1, Operand(masm_->CodeObject()));
 }
 
 
@@ -4742,6 +4139,32 @@
          Operand(SmiFromSlot(slot)));
 }
 
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+  DCHECK(!result_register().is(r1));
+  __ Pop(result_register());  // Restore the accumulator.
+  __ Pop(r1);                 // Get the token.
+  for (DeferredCommand cmd : commands_) {
+    Label skip;
+    __ cmp(r1, Operand(Smi::FromInt(cmd.token)));
+    __ b(ne, &skip);
+    switch (cmd.command) {
+      case kReturn:
+        codegen_->EmitUnwindAndReturn();
+        break;
+      case kThrow:
+        __ Push(result_register());
+        __ CallRuntime(Runtime::kReThrow);
+        break;
+      case kContinue:
+        codegen_->EmitContinue(cmd.target);
+        break;
+      case kBreak:
+        codegen_->EmitBreak(cmd.target);
+        break;
+    }
+    __ bind(&skip);
+  }
+}
 
 #undef __
 
diff --git a/src/full-codegen/arm64/full-codegen-arm64.cc b/src/full-codegen/arm64/full-codegen-arm64.cc
index e4141bb..d0278e7 100644
--- a/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -20,7 +20,7 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
 
 class JumpPatchSite BASE_EMBEDDED {
  public:
@@ -76,6 +76,7 @@
   }
 
  private:
+  MacroAssembler* masm() { return masm_; }
   MacroAssembler* masm_;
   Label patch_site_;
   Register reg_;
@@ -109,13 +110,6 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-    __ Debug("stop-at", __LINE__, BREAK);
-  }
-#endif
-
   if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
     int receiver_offset = info->scope()->num_parameters() * kXRegSize;
     __ Peek(x10, receiver_offset);
@@ -141,7 +135,7 @@
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
     DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
-
+    OperandStackDepthIncrement(locals_count);
     if (locals_count > 0) {
       if (locals_count >= 128) {
         Label ok;
@@ -267,21 +261,12 @@
   Variable* rest_param = scope()->rest_parameter(&rest_index);
   if (rest_param) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
-
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-    __ Mov(RestParamAccessDescriptor::parameter_count(),
-           Smi::FromInt(num_parameters));
-    __ Add(RestParamAccessDescriptor::parameter_pointer(), fp,
-           StandardFrameConstants::kCallerSPOffset + offset);
-    __ Mov(RestParamAccessDescriptor::rest_parameter_index(),
-           Smi::FromInt(rest_index));
-
-    function_in_register_x1 = false;
-
-    RestParamAccessStub stub(isolate());
+    if (!function_in_register_x1) {
+      __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    }
+    FastNewRestParameterStub stub(isolate());
     __ CallStub(&stub);
-
+    function_in_register_x1 = false;
     SetVar(rest_param, x0, x1, x2);
   }
 
@@ -289,28 +274,20 @@
   if (arguments != NULL) {
     // Function uses arguments object.
     Comment cmnt(masm_, "[ Allocate arguments object");
-    DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
     if (!function_in_register_x1) {
       // Load this again, if it's used by the local context below.
       __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     }
-    // Receiver is just before the parameters on the caller's stack.
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-    __ Mov(ArgumentsAccessNewDescriptor::parameter_count(),
-           Smi::FromInt(num_parameters));
-    __ Add(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
-           StandardFrameConstants::kCallerSPOffset + offset);
-
-    // Arguments to ArgumentsAccessStub:
-    //   function, parameter pointer, parameter count.
-    // The stub will rewrite parameter pointer and parameter count if the
-    // previous stack frame was an arguments adapter frame.
-    bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
-    ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
-        is_unmapped, literal()->has_duplicate_parameters());
-    ArgumentsAccessStub stub(isolate(), type);
-    __ CallStub(&stub);
+    if (is_strict(language_mode()) || !has_simple_parameters()) {
+      FastNewStrictArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    } else if (literal()->has_duplicate_parameters()) {
+      __ Push(x1);
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+    } else {
+      FastNewSloppyArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    }
 
     SetVar(arguments, x0, x1, x2);
   }
@@ -429,6 +406,30 @@
   PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
 }
 
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+    bool is_tail_call) {
+  // Pretend that the exit is a backwards jump to the entry.
+  int weight = 1;
+  if (info_->ShouldSelfOptimize()) {
+    weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+  } else {
+    int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
+    weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+  }
+  EmitProfilingCounterDecrement(weight);
+  Label ok;
+  __ B(pl, &ok);
+  // Don't need to save result register if we are going to do a tail call.
+  if (!is_tail_call) {
+    __ Push(x0);
+  }
+  __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+  if (!is_tail_call) {
+    __ Pop(x0);
+  }
+  EmitProfilingCounterReset();
+  __ Bind(&ok);
+}
 
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
@@ -445,24 +446,7 @@
       __ CallRuntime(Runtime::kTraceExit);
       DCHECK(x0.Is(result_register()));
     }
-    // Pretend that the exit is a backwards jump to the entry.
-    int weight = 1;
-    if (info_->ShouldSelfOptimize()) {
-      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-    } else {
-      int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
-      weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kCodeSizeMultiplier));
-    }
-    EmitProfilingCounterDecrement(weight);
-    Label ok;
-    __ B(pl, &ok);
-    __ Push(x0);
-    __ Call(isolate()->builtins()->InterruptCheck(),
-            RelocInfo::CODE_TARGET);
-    __ Pop(x0);
-    EmitProfilingCounterReset();
-    __ Bind(&ok);
+    EmitProfilingCounterHandlingForReturnSequence(false);
 
     SetReturnPosition(literal());
     const Register& current_sp = __ StackPointer();
@@ -486,7 +470,7 @@
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
-  __ Push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -504,7 +488,7 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Heap::RootListIndex index) const {
   __ LoadRoot(result_register(), index);
-  __ Push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -537,7 +521,7 @@
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
   // Immediates cannot be pushed directly.
   __ Mov(result_register(), Operand(lit));
-  __ Push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -546,7 +530,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ B(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -571,41 +555,14 @@
 }
 
 
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
-                                                   Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
-    int count,
-    Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-  __ Move(result_register(), reg);
-}
-
-
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
   DCHECK(count > 0);
-  if (count > 1) __ Drop(count - 1);
+  if (count > 1) codegen()->DropOperands(count - 1);
   __ Poke(reg, 0);
 }
 
 
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
-                                                 Register reg) const {
-  DCHECK(count > 0);
-  // For simplicity we always test the accumulator register.
-  __ Drop(count);
-  __ Mov(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
-  codegen()->DoTest(this);
-}
-
-
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
   DCHECK(materialize_true == materialize_false);
@@ -636,7 +593,7 @@
   __ Bind(materialize_false);
   __ LoadRoot(x10, Heap::kFalseValueRootIndex);
   __ Bind(&done);
-  __ Push(x10);
+  codegen()->PushOperand(x10);
 }
 
 
@@ -658,7 +615,7 @@
   Heap::RootListIndex value_root_index =
       flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
   __ LoadRoot(x10, value_root_index);
-  __ Push(x10);
+  codegen()->PushOperand(x10);
 }
 
 
@@ -787,7 +744,7 @@
   // The variable in the declaration always resides in the current function
   // context.
   DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
-  if (generate_debug_code_) {
+  if (FLAG_debug_code) {
     // Check that we're not inside a with or catch context.
     __ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
     __ CompareRoot(x1, Heap::kWithContextMapRootIndex);
@@ -908,11 +865,11 @@
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ Function Declaration");
       __ Mov(x2, Operand(variable->name()));
-      __ Push(x2);
+      PushOperand(x2);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
       break;
     }
   }
@@ -989,8 +946,8 @@
 
     // Record position before stub call for type feedback.
     SetExpressionPosition(clause);
-    Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
-                                             strength(language_mode())).code();
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -1010,7 +967,7 @@
   // Discard the test value and jump to the default if present, otherwise to
   // the end of the statement.
   __ Bind(&next_test);
-  __ Drop(1);  // Switch value is no longer needed.
+  DropOperands(1);  // Switch value is no longer needed.
   if (default_clause == NULL) {
     __ B(nested_statement.break_label());
   } else {
@@ -1044,22 +1001,18 @@
   ForIn loop_statement(this, stmt);
   increment_loop_depth();
 
-  // Get the object to enumerate over. If the object is null or undefined, skip
-  // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
-  Register null_value = x15;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ Cmp(x0, null_value);
-  __ B(eq, &exit);
+  OperandStackDepthIncrement(ForIn::kElementCount);
 
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
-  // Convert the object to a JS object.
+  // If the object is null or undefined, skip over the loop, otherwise convert
+  // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
   Label convert, done_convert;
   __ JumpIfSmi(x0, &convert);
   __ JumpIfObjectType(x0, x10, x11, FIRST_JS_RECEIVER_TYPE, &done_convert, ge);
+  __ JumpIfRoot(x0, Heap::kNullValueRootIndex, &exit);
+  __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
   __ Bind(&convert);
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
@@ -1067,15 +1020,14 @@
   PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
   __ Push(x0);
 
-  // Check for proxies.
-  Label call_runtime;
-  __ JumpIfObjectType(x0, x10, x11, JS_PROXY_TYPE, &call_runtime, eq);
-
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  __ CheckEnumCache(x0, null_value, x10, x11, x12, x13, &call_runtime);
+  // Note: Proxies never have an enum cache, so will always take the
+  // slow path.
+  Label call_runtime;
+  __ CheckEnumCache(x0, x15, x10, x11, x12, x13, &call_runtime);
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -1086,7 +1038,7 @@
   // Get the set of properties to enumerate.
   __ Bind(&call_runtime);
   __ Push(x0);  // Duplicate the enumerable object on the stack.
-  __ CallRuntime(Runtime::kGetPropertyNamesFast);
+  __ CallRuntime(Runtime::kForInEnumerate);
   PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
 
   // If we got a map from the runtime call, we can do a fast
@@ -1120,14 +1072,15 @@
   // We got a fixed array in register x0. Iterate through that.
   __ Bind(&fixed_array);
 
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(x1);
   __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-  int vector_index = SmiFromSlot(slot)->value();
   __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
   __ Mov(x1, Smi::FromInt(1));  // Smi(1) indicates slow check.
   __ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
-  // Smi and array, fixed array length (as smi) and initial index.
-  __ Push(x1, x0, x2, xzr);
+  __ Push(x1, x0, x2);  // Smi and array, fixed array length (as smi).
+  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+  __ Push(xzr);  // Initial index.
 
   // Generate code for doing the condition check.
   __ Bind(&loop);
@@ -1155,6 +1108,16 @@
   __ Cmp(x11, x2);
   __ B(eq, &update_each);
 
+  // We might get here from TurboFan or Crankshaft when something in the
+  // for-in loop body deopts and only now notice in fullcodegen, that we
+  // can now longer use the enum cache, i.e. left fast mode. So better record
+  // this information here, in case we later OSR back into this loop or
+  // reoptimize the whole function w/o rerunning the loop with the slow
+  // mode object in fullcodegen (which would result in a deopt loop).
+  __ EmitLoadTypeFeedbackVector(x0);
+  __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ Str(x10, FieldMemOperand(x0, FixedArray::OffsetOfElementAt(vector_index)));
+
   // Convert the entry to a string or (smi) 0 if it isn't a property
   // any more. If the property has been removed while iterating, we
   // just skip it.
@@ -1193,7 +1156,7 @@
 
   // Remove the pointers stored on the stack.
   __ Bind(loop_statement.break_label());
-  __ Drop(5);
+  DropOperands(5);
 
   // Exit and decrement the loop depth.
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1432,12 +1395,11 @@
       EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
       __ Bind(&slow);
       Comment cmnt(masm_, "Lookup variable");
-      __ Mov(x1, Operand(var->name()));
-      __ Push(cp, x1);  // Context and name.
+      __ Push(var->name());
       Runtime::FunctionId function_id =
           typeof_mode == NOT_INSIDE_TYPEOF
               ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotNoReferenceError;
+              : Runtime::kLoadLookupSlotInsideTypeof;
       __ CallRuntime(function_id);
       __ Bind(&done);
       context()->Plug(x0);
@@ -1463,7 +1425,7 @@
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
     __ LoadRoot(x10, Heap::kNullValueRootIndex);
-    __ Push(x10);
+    PushOperand(x10);
   } else {
     VisitForStackValue(expression);
     if (NeedsHomeObject(expression)) {
@@ -1508,7 +1470,7 @@
     Literal* key = property->key()->AsLiteral();
     Expression* value = property->value();
     if (!result_saved) {
-      __ Push(x0);  // Save result on stack
+      PushOperand(x0);  // Save result on stack
       result_saved = true;
     }
     switch (property->kind()) {
@@ -1539,7 +1501,7 @@
           break;
         }
         __ Peek(x0, 0);
-        __ Push(x0);
+        PushOperand(x0);
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
@@ -1547,19 +1509,19 @@
             EmitSetHomeObject(value, 2, property->GetSlot());
           }
           __ Mov(x0, Smi::FromInt(SLOPPY));  // Language mode
-          __ Push(x0);
-          __ CallRuntime(Runtime::kSetProperty);
+          PushOperand(x0);
+          CallRuntimeWithOperands(Runtime::kSetProperty);
         } else {
-          __ Drop(3);
+          DropOperands(3);
         }
         break;
       case ObjectLiteral::Property::PROTOTYPE:
         DCHECK(property->emit_store());
         // Duplicate receiver on stack.
         __ Peek(x0, 0);
-        __ Push(x0);
+        PushOperand(x0);
         VisitForStackValue(value);
-        __ CallRuntime(Runtime::kInternalSetPrototype);
+        CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                                NO_REGISTERS);
         break;
@@ -1582,13 +1544,13 @@
        it != accessor_table.end();
        ++it) {
       __ Peek(x10, 0);  // Duplicate receiver.
-      __ Push(x10);
+      PushOperand(x10);
       VisitForStackValue(it->first);
       EmitAccessor(it->second->getter);
       EmitAccessor(it->second->setter);
       __ Mov(x10, Smi::FromInt(NONE));
-      __ Push(x10);
-      __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+      PushOperand(x10);
+      CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1605,18 +1567,18 @@
 
     Expression* value = property->value();
     if (!result_saved) {
-      __ Push(x0);  // Save result on stack
+      PushOperand(x0);  // Save result on stack
       result_saved = true;
     }
 
     __ Peek(x10, 0);  // Duplicate receiver.
-    __ Push(x10);
+    PushOperand(x10);
 
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
       DCHECK(!property->is_computed_name());
       VisitForStackValue(value);
       DCHECK(property->emit_store());
-      __ CallRuntime(Runtime::kInternalSetPrototype);
+      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                              NO_REGISTERS);
     } else {
@@ -1631,11 +1593,11 @@
         case ObjectLiteral::Property::MATERIALIZED_LITERAL:
         case ObjectLiteral::Property::COMPUTED:
           if (property->emit_store()) {
-            __ Mov(x0, Smi::FromInt(NONE));
-            __ Push(x0);
-            __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+            PushOperand(Smi::FromInt(NONE));
+            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
           } else {
-            __ Drop(3);
+            DropOperands(3);
           }
           break;
 
@@ -1644,15 +1606,13 @@
           break;
 
         case ObjectLiteral::Property::GETTER:
-          __ Mov(x0, Smi::FromInt(NONE));
-          __ Push(x0);
-          __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
           break;
 
         case ObjectLiteral::Property::SETTER:
-          __ Mov(x0, Smi::FromInt(NONE));
-          __ Push(x0);
-          __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
           break;
       }
     }
@@ -1709,14 +1669,14 @@
   int array_index = 0;
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
-    if (subexpr->IsSpread()) break;
+    DCHECK(!subexpr->IsSpread());
 
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
-      __ Push(x0);
+      PushOperand(x0);
       result_saved = true;
     }
     VisitForAccumulatorValue(subexpr);
@@ -1737,21 +1697,16 @@
   // (inclusive) and these elements gets appended to the array. Note that the
   // number elements an iterable produces is unknown ahead of time.
   if (array_index < length && result_saved) {
-    __ Pop(x0);
+    PopOperand(x0);
     result_saved = false;
   }
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
 
-    __ Push(x0);
-    if (subexpr->IsSpread()) {
-      VisitForStackValue(subexpr->AsSpread()->expression());
-      __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
-                       CALL_FUNCTION);
-    } else {
-      VisitForStackValue(subexpr);
-      __ CallRuntime(Runtime::kAppendElement);
-    }
+    PushOperand(x0);
+    DCHECK(!subexpr->IsSpread());
+    VisitForStackValue(subexpr);
+    CallRuntimeWithOperands(Runtime::kAppendElement);
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
   }
@@ -1792,11 +1747,11 @@
           property->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           property->obj()->AsSuperPropertyReference()->home_object());
-      __ Push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
         const Register scratch = x10;
         __ Peek(scratch, kPointerSize);
-        __ Push(scratch, result_register());
+        PushOperands(scratch, result_register());
       }
       break;
     case KEYED_SUPER_PROPERTY:
@@ -1805,13 +1760,13 @@
       VisitForStackValue(
           property->obj()->AsSuperPropertyReference()->home_object());
       VisitForAccumulatorValue(property->key());
-      __ Push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
         const Register scratch1 = x10;
         const Register scratch2 = x11;
         __ Peek(scratch1, 2 * kPointerSize);
         __ Peek(scratch2, kPointerSize);
-        __ Push(scratch1, scratch2, result_register());
+        PushOperands(scratch1, scratch2, result_register());
       }
       break;
     case KEYED_PROPERTY:
@@ -1856,7 +1811,7 @@
     }
 
     Token::Value op = expr->binary_op();
-    __ Push(x0);  // Left operand goes on the stack.
+    PushOperand(x0);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
 
     AccumulatorValueContext context(this);
@@ -1911,38 +1866,7 @@
   __ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
   __ Mov(LoadDescriptor::SlotRegister(),
          SmiFromSlot(prop->PropertyFeedbackSlot()));
-  CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object.
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(prop->IsSuperAccess());
-
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  // Call keyed load IC. It has arguments key and receiver in x0 and x1.
-  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
-  __ Mov(LoadDescriptor::SlotRegister(),
-         SmiFromSlot(prop->PropertyFeedbackSlot()));
-  CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object, key.
-  SetExpressionPosition(prop);
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallLoadIC(NOT_INSIDE_TYPEOF);
 }
 
 
@@ -1956,7 +1880,7 @@
   Register left = x1;
   Register right = x0;
   Register result = x0;
-  __ Pop(left);
+  PopOperand(left);
 
   // Perform combined smi check on both operands.
   __ Orr(x10, left, right);
@@ -1965,8 +1889,7 @@
 
   __ Bind(&stub_call);
 
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   {
     Assembler::BlockPoolsScope scope(masm_);
     CallIC(code, expr->BinaryOperationFeedbackId());
@@ -2047,9 +1970,8 @@
 
 
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
-  __ Pop(x1);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  PopOperand(x1);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   JumpPatchSite patch_site(masm_);    // Unbound, signals no inlined smi code.
   {
     Assembler::BlockPoolsScope scope(masm_);
@@ -2061,27 +1983,17 @@
 
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  // Constructor is in x0.
-  DCHECK(lit != NULL);
-  __ push(x0);
-
-  // No access check is needed here since the constructor is created by the
-  // class literal.
-  Register scratch = x1;
-  __ Ldr(scratch,
-         FieldMemOperand(x0, JSFunction::kPrototypeOrInitialMapOffset));
-  __ Push(scratch);
-
   for (int i = 0; i < lit->properties()->length(); i++) {
     ObjectLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
+    Register scratch = x1;
     if (property->is_static()) {
       __ Peek(scratch, kPointerSize);  // constructor
     } else {
       __ Peek(scratch, 0);  // prototype
     }
-    __ Push(scratch);
+    PushOperand(scratch);
     EmitPropertyKey(property, lit->GetIdForProperty(i));
 
     // The static prototype property is read only. We handle the non computed
@@ -2104,29 +2016,25 @@
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();
       case ObjectLiteral::Property::COMPUTED:
-        __ CallRuntime(Runtime::kDefineClassMethod);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
       case ObjectLiteral::Property::GETTER:
-        __ Mov(x0, Smi::FromInt(DONT_ENUM));
-        __ Push(x0);
-        __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
       case ObjectLiteral::Property::SETTER:
-        __ Mov(x0, Smi::FromInt(DONT_ENUM));
-        __ Push(x0);
-        __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
       default:
         UNREACHABLE();
     }
   }
-
-  // Set both the prototype and constructor to have fast properties, and also
-  // freeze them in strong mode.
-  __ CallRuntime(Runtime::kFinalizeClassDefinition);
 }
 
 
@@ -2145,12 +2053,12 @@
       break;
     }
     case NAMED_PROPERTY: {
-      __ Push(x0);  // Preserve value.
+      PushOperand(x0);  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
       // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
       // this copy.
       __ Mov(StoreDescriptor::ReceiverRegister(), x0);
-      __ Pop(StoreDescriptor::ValueRegister());  // Restore value.
+      PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
       __ Mov(StoreDescriptor::NameRegister(),
              Operand(prop->key()->AsLiteral()->value()));
       EmitLoadStoreICSlot(slot);
@@ -2158,7 +2066,7 @@
       break;
     }
     case NAMED_SUPER_PROPERTY: {
-      __ Push(x0);
+      PushOperand(x0);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2175,7 +2083,7 @@
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      __ Push(x0);
+      PushOperand(x0);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForStackValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2195,12 +2103,12 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ Push(x0);  // Preserve value.
+      PushOperand(x0);  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ Mov(StoreDescriptor::NameRegister(), x0);
-      __ Pop(StoreDescriptor::ReceiverRegister(),
-             StoreDescriptor::ValueRegister());
+      PopOperands(StoreDescriptor::ReceiverRegister(),
+                  StoreDescriptor::ValueRegister());
       EmitLoadStoreICSlot(slot);
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2281,14 +2189,11 @@
              (var->mode() == CONST && op == Token::INIT)) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
-      __ Mov(x11, Operand(var->name()));
-      __ Mov(x10, Smi::FromInt(language_mode()));
-      // jssp[0]  : mode.
-      // jssp[8]  : name.
-      // jssp[16] : context.
-      // jssp[24] : value.
-      __ Push(x0, cp, x11, x10);
-      __ CallRuntime(Runtime::kStoreLookupSlot);
+      __ Push(var->name());
+      __ Push(x0);
+      __ CallRuntime(is_strict(language_mode())
+                         ? Runtime::kStoreLookupSlot_Strict
+                         : Runtime::kStoreLookupSlot_Sloppy);
     } else {
       // Assignment to var or initializing assignment to let/const in harmony
       // mode.
@@ -2338,7 +2243,7 @@
 
   __ Mov(StoreDescriptor::NameRegister(),
          Operand(prop->key()->AsLiteral()->value()));
-  __ Pop(StoreDescriptor::ReceiverRegister());
+  PopOperand(StoreDescriptor::ReceiverRegister());
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
@@ -2355,10 +2260,11 @@
   Literal* key = prop->key()->AsLiteral();
   DCHECK(key != NULL);
 
-  __ Push(key->value());
-  __ Push(x0);
-  __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
-                                             : Runtime::kStoreToSuper_Sloppy));
+  PushOperand(key->value());
+  PushOperand(x0);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreToSuper_Strict
+                              : Runtime::kStoreToSuper_Sloppy);
 }
 
 
@@ -2368,10 +2274,10 @@
   // stack : receiver ('this'), home_object, key
   DCHECK(prop != NULL);
 
-  __ Push(x0);
-  __ CallRuntime((is_strict(language_mode())
-                      ? Runtime::kStoreKeyedToSuper_Strict
-                      : Runtime::kStoreKeyedToSuper_Sloppy));
+  PushOperand(x0);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreKeyedToSuper_Strict
+                              : Runtime::kStoreKeyedToSuper_Sloppy);
 }
 
 
@@ -2380,7 +2286,8 @@
   // Assignment to a property, using a keyed store IC.
 
   // TODO(all): Could we pass this in registers rather than on the stack?
-  __ Pop(StoreDescriptor::NameRegister(), StoreDescriptor::ReceiverRegister());
+  PopOperands(StoreDescriptor::NameRegister(),
+              StoreDescriptor::ReceiverRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(x0));
 
   Handle<Code> ic =
@@ -2414,7 +2321,7 @@
       VisitForStackValue(expr->obj());
       VisitForAccumulatorValue(expr->key());
       __ Move(LoadDescriptor::NameRegister(), x0);
-      __ Pop(LoadDescriptor::ReceiverRegister());
+      PopOperand(LoadDescriptor::ReceiverRegister());
       EmitKeyedPropertyLoad(expr);
     } else {
       VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2456,7 +2363,7 @@
       UseScratchRegisterScope temps(masm_);
       Register temp = temps.AcquireX();
       __ LoadRoot(temp, Heap::kUndefinedValueRootIndex);
-      __ Push(temp);
+      PushOperand(temp);
     }
     convert_mode = ConvertReceiverMode::kNullOrUndefined;
   } else {
@@ -2467,8 +2374,8 @@
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
-    __ Pop(x10);
-    __ Push(x0, x10);
+    PopOperand(x10);
+    PushOperands(x0, x10);
     convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
   }
 
@@ -2493,19 +2400,18 @@
       callee->AsProperty()->obj()->AsSuperPropertyReference();
   VisitForStackValue(super_ref->home_object());
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(x0);
+  PushOperand(x0);
   __ Peek(scratch, kPointerSize);
-  __ Push(x0, scratch);
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
+  PushOperands(x0, scratch);
+  PushOperand(key->value());
 
   // Stack here:
   //  - home_object
   //  - this (receiver)
   //  - this (receiver) <-- LoadFromSuper will pop here and below.
   //  - home_object
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadFromSuper);
+  //  - key
+  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
 
   // Replace home_object with target function.
   __ Poke(x0, kPointerSize);
@@ -2534,8 +2440,8 @@
   PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
 
   // Push the target function under the receiver.
-  __ Pop(x10);
-  __ Push(x0, x10);
+  PopOperand(x10);
+  PushOperands(x0, x10);
 
   EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
 }
@@ -2555,11 +2461,10 @@
       callee->AsProperty()->obj()->AsSuperPropertyReference();
   VisitForStackValue(super_ref->home_object());
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(x0);
+  PushOperand(x0);
   __ Peek(scratch, kPointerSize);
-  __ Push(x0, scratch);
+  PushOperands(x0, scratch);
   VisitForStackValue(prop->key());
-  __ Push(Smi::FromInt(language_mode()));
 
   // Stack here:
   //  - home_object
@@ -2567,8 +2472,7 @@
   //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
 
   // Replace home_object with target function.
   __ Poke(x0, kPointerSize);
@@ -2591,13 +2495,23 @@
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
   SetCallPosition(expr);
-
-  Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+  if (expr->tail_call_mode() == TailCallMode::kAllow) {
+    if (FLAG_trace) {
+      __ CallRuntime(Runtime::kTraceTailCall);
+    }
+    // Update profiling counters before the tail call since we will
+    // not return to this function.
+    EmitProfilingCounterHandlingForReturnSequence(true);
+  }
+  Handle<Code> ic =
+      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+          .code();
   __ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
   __ Peek(x1, (arg_count + 1) * kXRegSize);
   // Don't assign a type feedback id to the IC, since type feedback is provided
   // by the vector above.
   CallIC(ic);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2644,10 +2558,9 @@
     __ Bind(&slow);
     // Call the runtime to find the function to call (returned in x0)
     // and the object holding it (returned in x1).
-    __ Mov(x10, Operand(callee->name()));
-    __ Push(context_register(), x10);
-    __ CallRuntime(Runtime::kLoadLookupSlot);
-    __ Push(x0, x1);  // Receiver, function.
+    __ Push(callee->name());
+    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+    PushOperands(x0, x1);  // Receiver, function.
     PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the
@@ -2668,7 +2581,7 @@
     VisitForStackValue(callee);
     // refEnv.WithBaseObject()
     __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
-    __ Push(x10);  // Reserved receiver slot.
+    PushOperand(x10);  // Reserved receiver slot.
   }
 }
 
@@ -2705,7 +2618,10 @@
   // Call the evaluated function.
   __ Peek(x1, (arg_count + 1) * kXRegSize);
   __ Mov(x0, arg_count);
-  __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                      expr->tail_call_mode()),
+          RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   // Restore context register.
   __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2746,6 +2662,7 @@
 
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   // Restore context register.
   __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2767,7 +2684,7 @@
          FieldMemOperand(result_register(), HeapObject::kMapOffset));
   __ Ldr(result_register(),
          FieldMemOperand(result_register(), Map::kPrototypeOffset));
-  __ Push(result_register());
+  PushOperand(result_register());
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2789,6 +2706,7 @@
   __ Peek(x1, arg_count * kXRegSize);
 
   __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
 
@@ -2840,77 +2758,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(x0, if_false);
-  __ CompareObjectType(x0, x10, x11, SIMD128_VALUE_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(x0, if_false);
-  __ CompareObjectType(x0, x10, x11, FIRST_FUNCTION_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(hs, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  // Only a HeapNumber can be -0.0, so return false if we have something else.
-  __ JumpIfNotHeapNumber(x0, if_false, DO_SMI_CHECK);
-
-  // Test the bit pattern.
-  __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
-  __ Cmp(x10, 1);   // Set V on 0x8000000000000000.
-
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(vs, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2999,65 +2846,6 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ Pop(x1);
-  __ Cmp(x0, x1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in x1.
-  VisitForAccumulatorValue(args->at(0));
-  __ Mov(x1, x0);
-  __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
-  ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
-  __ CallStub(&stub);
-  context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  DCHECK(expr->arguments()->length() == 0);
-  Label exit;
-  // Get the number of formal parameters.
-  __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ Ldr(x12, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ Ldr(x13, MemOperand(x12, StandardFrameConstants::kContextOffset));
-  __ Cmp(x13, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ B(ne, &exit);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ Ldr(x0, MemOperand(x12, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ Bind(&exit);
-  context()->Plug(x0);
-}
-
-
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ASM_LOCATION("FullCodeGenerator::EmitClassOf");
   ZoneList<Expression*>* args = expr->arguments();
@@ -3130,28 +2918,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = nullptr;
-  Label* if_false = nullptr;
-  Label* fall_through = nullptr;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(x0, if_false);
-  __ CompareObjectType(x0, x10, x11, JS_DATE_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(3, args->length());
@@ -3164,7 +2930,7 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(value, index);
+  PopOperands(value, index);
 
   if (FLAG_debug_code) {
     __ AssertSmi(value, kNonSmiValue);
@@ -3194,7 +2960,7 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(value, index);
+  PopOperands(value, index);
 
   if (FLAG_debug_code) {
     __ AssertSmi(value, kNonSmiValue);
@@ -3212,35 +2978,6 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(0));  // Load the object.
-  VisitForAccumulatorValue(args->at(1));  // Load the value.
-  __ Pop(x1);
-  // x0 = value.
-  // x1 = object.
-
-  Label done;
-  // If the object is a smi, return the value.
-  __ JumpIfSmi(x1, &done);
-
-  // If the object is not a value type, return the value.
-  __ JumpIfNotObjectType(x1, x10, x11, JS_VALUE_TYPE, &done);
-
-  // Store the value.
-  __ Str(x0, FieldMemOperand(x1, JSValue::kValueOffset));
-  // Update the write barrier. Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  __ Mov(x10, x0);
-  __ RecordWriteField(
-      x1, JSValue::kValueOffset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
-
-  __ Bind(&done);
-  context()->Plug(x0);
-}
-
-
 void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3258,25 +2995,6 @@
 }
 
 
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into x0 and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  Label convert, done_convert;
-  __ JumpIfSmi(x0, &convert);
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, &done_convert, ls);
-  __ Bind(&convert);
-  __ Push(x0);
-  __ CallRuntime(Runtime::kToName);
-  __ Bind(&done_convert);
-  context()->Plug(x0);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3310,7 +3028,7 @@
   Register index = x0;
   Register result = x3;
 
-  __ Pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3355,7 +3073,7 @@
   Register index = x0;
   Register result = x0;
 
-  __ Pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3405,6 +3123,7 @@
   // Call the target.
   __ Mov(x0, argc);
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(argc + 1);
   // Restore context register.
   __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   // Discard the function left on TOS.
@@ -3457,226 +3176,6 @@
 }
 
 
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
-  ASM_LOCATION("FullCodeGenerator::EmitFastOneByteArrayJoin");
-
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(1));
-  VisitForAccumulatorValue(args->at(0));
-
-  Register array = x0;
-  Register result = x0;
-  Register elements = x1;
-  Register element = x2;
-  Register separator = x3;
-  Register array_length = x4;
-  Register result_pos = x5;
-  Register map = x6;
-  Register string_length = x10;
-  Register elements_end = x11;
-  Register string = x12;
-  Register scratch1 = x13;
-  Register scratch2 = x14;
-  Register scratch3 = x7;
-  Register separator_length = x15;
-
-  Label bailout, done, one_char_separator, long_separator,
-      non_trivial_array, not_size_one_array, loop,
-      empty_separator_loop, one_char_separator_loop,
-      one_char_separator_loop_entry, long_separator_loop;
-
-  // The separator operand is on the stack.
-  __ Pop(separator);
-
-  // Check that the array is a JSArray.
-  __ JumpIfSmi(array, &bailout);
-  __ JumpIfNotObjectType(array, map, scratch1, JS_ARRAY_TYPE, &bailout);
-
-  // Check that the array has fast elements.
-  __ CheckFastElements(map, scratch1, &bailout);
-
-  // If the array has length zero, return the empty string.
-  // Load and untag the length of the array.
-  // It is an unsigned value, so we can skip sign extension.
-  // We assume little endianness.
-  __ Ldrsw(array_length,
-           UntagSmiFieldMemOperand(array, JSArray::kLengthOffset));
-  __ Cbnz(array_length, &non_trivial_array);
-  __ LoadRoot(result, Heap::kempty_stringRootIndex);
-  __ B(&done);
-
-  __ Bind(&non_trivial_array);
-  // Get the FixedArray containing array's elements.
-  __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
-
-  // Check that all array elements are sequential one-byte strings, and
-  // accumulate the sum of their lengths.
-  __ Mov(string_length, 0);
-  __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
-  // Loop condition: while (element < elements_end).
-  // Live values in registers:
-  //   elements: Fixed array of strings.
-  //   array_length: Length of the fixed array of strings (not smi)
-  //   separator: Separator string
-  //   string_length: Accumulated sum of string lengths (not smi).
-  //   element: Current array element.
-  //   elements_end: Array end.
-  if (FLAG_debug_code) {
-    __ Cmp(array_length, 0);
-    __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
-  }
-  __ Bind(&loop);
-  __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
-  __ JumpIfSmi(string, &bailout);
-  __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-  __ Ldrsw(scratch1,
-           UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
-  __ Adds(string_length, string_length, scratch1);
-  __ B(vs, &bailout);
-  __ Cmp(element, elements_end);
-  __ B(lt, &loop);
-
-  // If array_length is 1, return elements[0], a string.
-  __ Cmp(array_length, 1);
-  __ B(ne, &not_size_one_array);
-  __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize));
-  __ B(&done);
-
-  __ Bind(&not_size_one_array);
-
-  // Live values in registers:
-  //   separator: Separator string
-  //   array_length: Length of the array (not smi).
-  //   string_length: Sum of string lengths (not smi).
-  //   elements: FixedArray of strings.
-
-  // Check that the separator is a flat one-byte string.
-  __ JumpIfSmi(separator, &bailout);
-  __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
-  __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-
-  // Add (separator length times array_length) - separator length to the
-  // string_length to get the length of the result string.
-  // Load the separator length as untagged.
-  // We assume little endianness, and that the length is positive.
-  __ Ldrsw(separator_length,
-           UntagSmiFieldMemOperand(separator,
-                                   SeqOneByteString::kLengthOffset));
-  __ Sub(string_length, string_length, separator_length);
-  __ Umaddl(string_length, array_length.W(), separator_length.W(),
-            string_length);
-
-  // Bailout for large object allocations.
-  __ Cmp(string_length, Page::kMaxRegularHeapObjectSize);
-  __ B(gt, &bailout);
-
-  // Get first element in the array.
-  __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  // Live values in registers:
-  //   element: First array element
-  //   separator: Separator string
-  //   string_length: Length of result string (not smi)
-  //   array_length: Length of the array (not smi).
-  __ AllocateOneByteString(result, string_length, scratch1, scratch2, scratch3,
-                           &bailout);
-
-  // Prepare for looping. Set up elements_end to end of the array. Set
-  // result_pos to the position of the result where to write the first
-  // character.
-  // TODO(all): useless unless AllocateOneByteString trashes the register.
-  __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
-  __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
-  // Check the length of the separator.
-  __ Cmp(separator_length, 1);
-  __ B(eq, &one_char_separator);
-  __ B(gt, &long_separator);
-
-  // Empty separator case
-  __ Bind(&empty_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-
-  // Copy next array element to the result.
-  __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
-  __ Ldrsw(string_length,
-           UntagSmiFieldMemOperand(string, String::kLengthOffset));
-  __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-  __ CopyBytes(result_pos, string, string_length, scratch1);
-  __ Cmp(element, elements_end);
-  __ B(lt, &empty_separator_loop);  // End while (element < elements_end).
-  __ B(&done);
-
-  // One-character separator case
-  __ Bind(&one_char_separator);
-  // Replace separator with its one-byte character value.
-  __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ B(&one_char_separator_loop_entry);
-
-  __ Bind(&one_char_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-  //   separator: Single separator one-byte char (in lower byte).
-
-  // Copy the separator character to the result.
-  __ Strb(separator, MemOperand(result_pos, 1, PostIndex));
-
-  // Copy next array element to the result.
-  __ Bind(&one_char_separator_loop_entry);
-  __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
-  __ Ldrsw(string_length,
-           UntagSmiFieldMemOperand(string, String::kLengthOffset));
-  __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-  __ CopyBytes(result_pos, string, string_length, scratch1);
-  __ Cmp(element, elements_end);
-  __ B(lt, &one_char_separator_loop);  // End while (element < elements_end).
-  __ B(&done);
-
-  // Long separator case (separator is more than one character). Entry is at the
-  // label long_separator below.
-  __ Bind(&long_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-  //   separator: Separator string.
-
-  // Copy the separator to the result.
-  // TODO(all): hoist next two instructions.
-  __ Ldrsw(string_length,
-           UntagSmiFieldMemOperand(separator, String::kLengthOffset));
-  __ Add(string, separator, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-  __ CopyBytes(result_pos, string, string_length, scratch1);
-
-  __ Bind(&long_separator);
-  __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
-  __ Ldrsw(string_length,
-           UntagSmiFieldMemOperand(string, String::kLengthOffset));
-  __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-  __ CopyBytes(result_pos, string, string_length, scratch1);
-  __ Cmp(element, elements_end);
-  __ B(lt, &long_separator_loop);  // End while (element < elements_end).
-  __ B(&done);
-
-  __ Bind(&bailout);
-  // Returning undefined will force slower code to handle it.
-  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-  __ Bind(&done);
-  context()->Plug(result);
-}
-
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3721,7 +3220,7 @@
   __ B(&done);
 
   __ Bind(&runtime);
-  __ CallRuntime(Runtime::kCreateIterResultObject);
+  CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
 
   __ Bind(&done);
   context()->Plug(x0);
@@ -3731,7 +3230,7 @@
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
   // Push undefined as the receiver.
   __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
-  __ Push(x0);
+  PushOperand(x0);
 
   __ LoadNativeContextSlot(expr->context_index(), x0);
 }
@@ -3746,6 +3245,7 @@
   __ Mov(x0, arg_count);
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 }
 
 
@@ -3758,8 +3258,8 @@
     EmitLoadJSRuntimeFunction(expr);
 
     // Push the target function under the receiver.
-    __ Pop(x10);
-    __ Push(x0, x10);
+    PopOperand(x10);
+    PushOperands(x0, x10);
 
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
@@ -3793,6 +3293,7 @@
         // Call the C runtime function.
         PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
         __ CallRuntime(expr->function(), arg_count);
+        OperandStackDepthDecrement(arg_count);
         context()->Plug(x0);
       }
     }
@@ -3810,9 +3311,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ CallRuntime(is_strict(language_mode())
-                           ? Runtime::kDeleteProperty_Strict
-                           : Runtime::kDeleteProperty_Sloppy);
+        CallRuntimeWithOperands(is_strict(language_mode())
+                                    ? Runtime::kDeleteProperty_Strict
+                                    : Runtime::kDeleteProperty_Sloppy);
         context()->Plug(x0);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
@@ -3833,8 +3334,7 @@
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
-          __ Mov(x2, Operand(var->name()));
-          __ Push(context_register(), x2);
+          __ Push(var->name());
           __ CallRuntime(Runtime::kDeleteLookupSlot);
           context()->Plug(x0);
         }
@@ -3876,6 +3376,7 @@
                         &materialize_false,
                         &materialize_true,
                         &materialize_true);
+        if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
 
         __ Bind(&materialize_true);
         PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
@@ -3928,7 +3429,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      __ Push(xzr);
+      PushOperand(xzr);
     }
     switch (assign_type) {
       case NAMED_PROPERTY: {
@@ -3943,10 +3444,10 @@
         VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
         VisitForAccumulatorValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
-        __ Push(result_register());
+        PushOperand(result_register());
         const Register scratch = x10;
         __ Peek(scratch, kPointerSize);
-        __ Push(scratch, result_register());
+        PushOperands(scratch, result_register());
         EmitNamedSuperPropertyLoad(prop);
         break;
       }
@@ -3956,12 +3457,12 @@
         VisitForStackValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
         VisitForAccumulatorValue(prop->key());
-        __ Push(result_register());
+        PushOperand(result_register());
         const Register scratch1 = x10;
         const Register scratch2 = x11;
         __ Peek(scratch1, 2 * kPointerSize);
         __ Peek(scratch2, kPointerSize);
-        __ Push(scratch1, scratch2, result_register());
+        PushOperands(scratch1, scratch2, result_register());
         EmitKeyedSuperPropertyLoad(prop);
         break;
       }
@@ -4044,7 +3545,7 @@
       // of the stack.
       switch (assign_type) {
         case VARIABLE:
-          __ Push(x0);
+          PushOperand(x0);
           break;
         case NAMED_PROPERTY:
           __ Poke(x0, kXRegSize);
@@ -4070,9 +3571,7 @@
 
   {
     Assembler::BlockPoolsScope scope(masm_);
-    Handle<Code> code =
-        CodeFactory::BinaryOpIC(isolate(), Token::ADD,
-                                strength(language_mode())).code();
+    Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
     CallIC(code, expr->CountBinOpFeedbackId());
     patch_site.EmitPatchInfo();
   }
@@ -4106,7 +3605,7 @@
     case NAMED_PROPERTY: {
       __ Mov(StoreDescriptor::NameRegister(),
              Operand(prop->key()->AsLiteral()->value()));
-      __ Pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4142,8 +3641,8 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ Pop(StoreDescriptor::NameRegister());
-      __ Pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::NameRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
@@ -4204,7 +3703,7 @@
   } else if (String::Equals(check, factory->undefined_string())) {
     ASM_LOCATION(
         "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
-    __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true);
+    __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_false);
     __ JumpIfSmi(x0, if_false);
     // Check for undetectable objects => true.
     __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
@@ -4274,7 +3773,7 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      __ CallRuntime(Runtime::kHasProperty);
+      CallRuntimeWithOperands(Runtime::kHasProperty);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(x0, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
@@ -4282,7 +3781,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
-      __ Pop(x1);
+      PopOperand(x1);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4296,7 +3795,7 @@
       Condition cond = CompareIC::ComputeCondition(op);
 
       // Pop the stack value.
-      __ Pop(x1);
+      PopOperand(x1);
 
       JumpPatchSite patch_site(masm_);
       if (ShouldInlineSmiCase(op)) {
@@ -4307,8 +3806,7 @@
         __ Bind(&slow_case);
       }
 
-      Handle<Code> ic = CodeFactory::CompareIC(
-                            isolate(), op, strength(language_mode())).code();
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
       PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4385,8 +3883,16 @@
       // looks at its pos(). Is it possible to do something more efficient here,
       // perhaps using Adr?
       __ Bind(&continuation);
+      // When we arrive here, the stack top is the resume mode and
+      // result_register() holds the input value (the argument given to the
+      // respective resume operation).
       __ RecordGeneratorContinuation();
-      __ B(&resume);
+      __ Pop(x1);
+      __ Cmp(x1, Smi::FromInt(JSGeneratorObject::RETURN));
+      __ B(ne, &resume);
+      __ Push(result_register());
+      EmitCreateIteratorResult(true);
+      EmitUnwindAndReturn();
 
       __ Bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
@@ -4404,7 +3910,7 @@
       __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ Bind(&post_runtime);
-      __ Pop(result_register());
+      PopOperand(result_register());
       EmitReturnSequence();
 
       __ Bind(&resume);
@@ -4413,127 +3919,15 @@
     }
 
     case Yield::kFinal: {
-      VisitForAccumulatorValue(expr->generator_object());
-      __ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
-      __ Str(x1, FieldMemOperand(result_register(),
-                                 JSGeneratorObject::kContinuationOffset));
       // Pop value from top-of-stack slot, box result into result register.
+      OperandStackDepthDecrement(1);
       EmitCreateIteratorResult(true);
-      EmitUnwindBeforeReturn();
-      EmitReturnSequence();
+      EmitUnwindAndReturn();
       break;
     }
 
-    case Yield::kDelegating: {
-      VisitForStackValue(expr->generator_object());
-
-      // Initial stack layout is as follows:
-      // [sp + 1 * kPointerSize] iter
-      // [sp + 0 * kPointerSize] g
-
-      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
-      Label l_next, l_call, l_loop;
-      Register load_receiver = LoadDescriptor::ReceiverRegister();
-      Register load_name = LoadDescriptor::NameRegister();
-
-      // Initial send value is undefined.
-      __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
-      __ B(&l_next);
-
-      // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
-      __ Bind(&l_catch);
-      __ LoadRoot(load_name, Heap::kthrow_stringRootIndex);  // "throw"
-      __ Peek(x3, 1 * kPointerSize);                         // iter
-      __ Push(load_name, x3, x0);                       // "throw", iter, except
-      __ B(&l_call);
-
-      // try { received = %yield result }
-      // Shuffle the received result above a try handler and yield it without
-      // re-boxing.
-      __ Bind(&l_try);
-      __ Pop(x0);                                        // result
-      int handler_index = NewHandlerTableEntry();
-      EnterTryBlock(handler_index, &l_catch);
-      const int try_block_size = TryCatch::kElementCount * kPointerSize;
-      __ Push(x0);                                       // result
-
-      __ B(&l_suspend);
-      // TODO(jbramley): This label is bound here because the following code
-      // looks at its pos(). Is it possible to do something more efficient here,
-      // perhaps using Adr?
-      __ Bind(&l_continuation);
-      __ RecordGeneratorContinuation();
-      __ B(&l_resume);
-
-      __ Bind(&l_suspend);
-      const int generator_object_depth = kPointerSize + try_block_size;
-      __ Peek(x0, generator_object_depth);
-      __ Push(x0);                                       // g
-      __ Push(Smi::FromInt(handler_index));              // handler-index
-      DCHECK((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
-      __ Mov(x1, Smi::FromInt(l_continuation.pos()));
-      __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
-      __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
-      __ Mov(x1, cp);
-      __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
-                          kLRHasBeenSaved, kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
-      __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ Pop(x0);                                        // result
-      EmitReturnSequence();
-      __ Bind(&l_resume);                                // received in x0
-      ExitTryBlock(handler_index);
-
-      // receiver = iter; f = 'next'; arg = received;
-      __ Bind(&l_next);
-
-      __ LoadRoot(load_name, Heap::knext_stringRootIndex);  // "next"
-      __ Peek(x3, 1 * kPointerSize);                        // iter
-      __ Push(load_name, x3, x0);                      // "next", iter, received
-
-      // result = receiver[f](arg);
-      __ Bind(&l_call);
-      __ Peek(load_receiver, 1 * kPointerSize);
-      __ Peek(load_name, 2 * kPointerSize);
-      __ Mov(LoadDescriptor::SlotRegister(),
-             SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
-      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
-      CallIC(ic, TypeFeedbackId::None());
-      __ Mov(x1, x0);
-      __ Poke(x1, 2 * kPointerSize);
-      SetCallPosition(expr);
-      __ Mov(x0, 1);
-      __ Call(
-          isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
-          RelocInfo::CODE_TARGET);
-
-      __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ Drop(1);  // The function is still on the stack; drop it.
-
-      // if (!result.done) goto l_try;
-      __ Bind(&l_loop);
-      __ Move(load_receiver, x0);
-
-      __ Push(load_receiver);                               // save result
-      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
-      __ Mov(LoadDescriptor::SlotRegister(),
-             SmiFromSlot(expr->DoneFeedbackSlot()));
-      CallLoadIC(NOT_INSIDE_TYPEOF);  // x0=result.done
-      // The ToBooleanStub argument (result.done) is in x0.
-      Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
-      CallIC(bool_ic);
-      __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
-      __ B(ne, &l_try);
-
-      // result.value
-      __ Pop(load_receiver);                                 // result
-      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
-      __ Mov(LoadDescriptor::SlotRegister(),
-             SmiFromSlot(expr->ValueFeedbackSlot()));
-      CallLoadIC(NOT_INSIDE_TYPEOF);                         // x0=result.value
-      context()->DropAndPlug(2, x0);                         // drop iter and g
-      break;
-    }
+    case Yield::kDelegating:
+      UNREACHABLE();
   }
 }
 
@@ -4553,7 +3947,14 @@
   // will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
   VisitForAccumulatorValue(value);
-  __ Pop(generator_object);
+  PopOperand(generator_object);
+
+  // Store input value into generator object.
+  __ Str(result_register(),
+         FieldMemOperand(x1, JSGeneratorObject::kInputOffset));
+  __ Mov(x2, result_register());
+  __ RecordWriteField(x1, JSGeneratorObject::kInputOffset, x2, x3,
+                      kLRHasBeenSaved, kDontSaveFPRegs);
 
   // Load suspended function and context.
   __ Ldr(cp, FieldMemOperand(generator_object,
@@ -4610,6 +4011,7 @@
     __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
     __ Str(x12, FieldMemOperand(generator_object,
                                 JSGeneratorObject::kContinuationOffset));
+    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
     __ Br(x10);
 
     __ Bind(&slow_resume);
@@ -4620,6 +4022,7 @@
   __ PushMultipleTimes(the_hole, operand_stack_size);
 
   __ Mov(x10, Smi::FromInt(resume_mode));
+  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
   __ Push(generator_object, result_register(), x10);
   __ CallRuntime(Runtime::kResumeJSGeneratorObject);
   // Not reached: the runtime call returns elsewhere.
@@ -4629,6 +4032,31 @@
   context()->Plug(result_register());
 }
 
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+  OperandStackDepthIncrement(2);
+  __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+                                     Register reg3) {
+  OperandStackDepthIncrement(3);
+  __ Push(reg1, reg2, reg3);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+  OperandStackDepthDecrement(2);
+  __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+  if (FLAG_debug_code) {
+    int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+                        operand_stack_depth_ * kPointerSize;
+    __ Sub(x0, fp, jssp);
+    __ Cmp(x0, Operand(expected_diff));
+    __ Assert(eq, kUnexpectedStackDepth);
+  }
+}
 
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
@@ -4716,25 +4144,19 @@
     DCHECK(closure_scope->is_function_scope());
     __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   }
-  __ Push(x10);
+  PushOperand(x10);
 }
 
 
 void FullCodeGenerator::EnterFinallyBlock() {
   ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
   DCHECK(!result_register().is(x10));
-  // Preserve the result register while executing finally block.
-  // Also cook the return address in lr to the stack (smi encoded Code* delta).
-  __ Sub(x10, lr, Operand(masm_->CodeObject()));
-  __ SmiTag(x10);
-  __ Push(result_register(), x10);
-
   // Store pending message while executing finally block.
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ Mov(x10, pending_message_obj);
   __ Ldr(x10, MemOperand(x10));
-  __ Push(x10);
+  PushOperand(x10);
 
   ClearPendingMessage();
 }
@@ -4745,19 +4167,11 @@
   DCHECK(!result_register().is(x10));
 
   // Restore pending message from stack.
-  __ Pop(x10);
+  PopOperand(x10);
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ Mov(x13, pending_message_obj);
   __ Str(x10, MemOperand(x13));
-
-  // Restore result register and cooked return address from the stack.
-  __ Pop(x10, result_register());
-
-  // Uncook the return address (see EnterFinallyBlock).
-  __ SmiUntag(x10);
-  __ Add(x11, x10, Operand(masm_->CodeObject()));
-  __ Br(x11);
 }
 
 
@@ -4776,6 +4190,30 @@
   __ Mov(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
 }
 
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+  __ Pop(result_register(), x1);  // Restore the accumulator and get the token.
+  for (DeferredCommand cmd : commands_) {
+    Label skip;
+    __ Cmp(x1, Operand(Smi::FromInt(cmd.token)));
+    __ B(ne, &skip);
+    switch (cmd.command) {
+      case kReturn:
+        codegen_->EmitUnwindAndReturn();
+        break;
+      case kThrow:
+        __ Push(result_register());
+        __ CallRuntime(Runtime::kReThrow);
+        break;
+      case kContinue:
+        codegen_->EmitContinue(cmd.target);
+        break;
+      case kBreak:
+        codegen_->EmitBreak(cmd.target);
+        break;
+    }
+    __ bind(&skip);
+  }
+}
 
 #undef __
 
diff --git a/src/full-codegen/full-codegen.cc b/src/full-codegen/full-codegen.cc
index 416a69c..8255089 100644
--- a/src/full-codegen/full-codegen.cc
+++ b/src/full-codegen/full-codegen.cc
@@ -17,6 +17,7 @@
 #include "src/isolate-inl.h"
 #include "src/macro-assembler.h"
 #include "src/snapshot/snapshot.h"
+#include "src/tracing/trace-event.h"
 
 namespace v8 {
 namespace internal {
@@ -27,6 +28,7 @@
   Isolate* isolate = info->isolate();
 
   TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
+  TRACE_EVENT0("v8", "V8.CompileFullCode");
 
   // Ensure that the feedback vector is large enough.
   info->EnsureFeedbackVector();
@@ -127,7 +129,7 @@
     table->SetRangeStart(i, handler_table_[i].range_start);
     table->SetRangeEnd(i, handler_table_[i].range_end);
     table->SetRangeHandler(i, handler_table_[i].handler_offset, prediction);
-    table->SetRangeDepth(i, handler_table_[i].stack_depth);
+    table->SetRangeData(i, handler_table_[i].stack_depth);
   }
   code->set_handler_table(*table);
 }
@@ -143,13 +145,11 @@
 
 bool FullCodeGenerator::MustCreateObjectLiteralWithRuntime(
     ObjectLiteral* expr) const {
-  int literal_flags = expr->ComputeFlags();
   // FastCloneShallowObjectStub doesn't copy elements, and object literals don't
   // support copy-on-write (COW) elements for now.
   // TODO(mvstanton): make object literals support COW elements.
-  return masm()->serializer_enabled() ||
-         literal_flags != ObjectLiteral::kShallowProperties ||
-         literal_flags != ObjectLiteral::kFastElements ||
+  return masm()->serializer_enabled() || !expr->fast_elements() ||
+         !expr->has_shallow_properties() ||
          expr->properties_count() >
              FastCloneShallowObjectStub::kMaximumClonedProperties;
 }
@@ -165,14 +165,7 @@
 
 void FullCodeGenerator::Initialize() {
   InitializeAstVisitor(info_->isolate());
-  // The generation of debug code must match between the snapshot code and the
-  // code that is generated later.  This is assumed by the debugger when it is
-  // calculating PC offsets after generating a debug version of code.  Therefore
-  // we disable the production of debug code in the full compiler if we are
-  // either generating a snapshot or we booted from a snapshot.
-  generate_debug_code_ = FLAG_debug_code && !masm_->serializer_enabled() &&
-                         !info_->isolate()->snapshot_available();
-  masm_->set_emit_debug_code(generate_debug_code_);
+  masm_->set_emit_debug_code(FLAG_debug_code);
   masm_->set_predictable_code_size(true);
 }
 
@@ -183,10 +176,8 @@
 
 
 void FullCodeGenerator::CallLoadIC(TypeofMode typeof_mode,
-                                   LanguageMode language_mode,
                                    TypeFeedbackId id) {
-  Handle<Code> ic =
-      CodeFactory::LoadIC(isolate(), typeof_mode, language_mode).code();
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), typeof_mode).code();
   CallIC(ic, id);
 }
 
@@ -281,7 +272,7 @@
 
 
 void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
-  __ Push(reg);
+  codegen()->PushOperand(reg);
 }
 
 
@@ -295,14 +286,36 @@
 
 void FullCodeGenerator::EffectContext::Plug(bool flag) const {}
 
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+                                                   Register reg) const {
+  DCHECK(count > 0);
+  codegen()->DropOperands(count);
+}
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+    int count, Register reg) const {
+  DCHECK(count > 0);
+  codegen()->DropOperands(count);
+  __ Move(result_register(), reg);
+}
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+                                                 Register reg) const {
+  DCHECK(count > 0);
+  // For simplicity we always test the accumulator register.
+  codegen()->DropOperands(count);
+  __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->DoTest(this);
+}
 
 void FullCodeGenerator::EffectContext::PlugTOS() const {
-  __ Drop(1);
+  codegen()->DropOperands(1);
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
-  __ Pop(result_register());
+  codegen()->PopOperand(result_register());
 }
 
 
@@ -312,7 +325,7 @@
 
 void FullCodeGenerator::TestContext::PlugTOS() const {
   // For simplicity we always test the accumulator register.
-  __ Pop(result_register());
+  codegen()->PopOperand(result_register());
   codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
   codegen()->DoTest(this);
 }
@@ -442,6 +455,47 @@
          DeclareGlobalsLanguageMode::encode(language_mode());
 }
 
+void FullCodeGenerator::PushOperand(Handle<Object> handle) {
+  OperandStackDepthIncrement(1);
+  __ Push(handle);
+}
+
+void FullCodeGenerator::PushOperand(Smi* smi) {
+  OperandStackDepthIncrement(1);
+  __ Push(smi);
+}
+
+void FullCodeGenerator::PushOperand(Register reg) {
+  OperandStackDepthIncrement(1);
+  __ Push(reg);
+}
+
+void FullCodeGenerator::PopOperand(Register reg) {
+  OperandStackDepthDecrement(1);
+  __ Pop(reg);
+}
+
+void FullCodeGenerator::DropOperands(int count) {
+  OperandStackDepthDecrement(count);
+  __ Drop(count);
+}
+
+void FullCodeGenerator::CallRuntimeWithOperands(Runtime::FunctionId id) {
+  OperandStackDepthDecrement(Runtime::FunctionForId(id)->nargs);
+  __ CallRuntime(id);
+}
+
+void FullCodeGenerator::OperandStackDepthIncrement(int count) {
+  DCHECK_GE(count, 0);
+  DCHECK_GE(operand_stack_depth_, 0);
+  operand_stack_depth_ += count;
+}
+
+void FullCodeGenerator::OperandStackDepthDecrement(int count) {
+  DCHECK_GE(count, 0);
+  DCHECK_GE(operand_stack_depth_, count);
+  operand_stack_depth_ -= count;
+}
 
 void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
   // Load the arguments on the stack and call the stub.
@@ -452,6 +506,7 @@
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
+  OperandStackDepthDecrement(3);
   context()->Plug(result_register());
 }
 
@@ -466,19 +521,20 @@
   VisitForStackValue(args->at(2));
   VisitForStackValue(args->at(3));
   __ CallStub(&stub);
+  OperandStackDepthDecrement(4);
   context()->Plug(result_register());
 }
 
 
 void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
+  MathPowStub stub(isolate(), MathPowStub::ON_STACK);
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
-
-  MathPowStub stub(isolate(), MathPowStub::ON_STACK);
   __ CallStub(&stub);
+  OperandStackDepthDecrement(2);
   context()->Plug(result_register());
 }
 
@@ -502,7 +558,7 @@
     __ Move(callable.descriptor().GetRegisterParameter(last),
             result_register());
     for (int i = last; i-- > 0;) {
-      __ Pop(callable.descriptor().GetRegisterParameter(i));
+      PopOperand(callable.descriptor().GetRegisterParameter(i));
     }
   }
   __ Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -520,6 +576,11 @@
 }
 
 
+void FullCodeGenerator::EmitToName(CallRuntime* expr) {
+  EmitIntrinsicAsStubCall(expr, CodeFactory::ToName(isolate()));
+}
+
+
 void FullCodeGenerator::EmitToLength(CallRuntime* expr) {
   EmitIntrinsicAsStubCall(expr, CodeFactory::ToLength(isolate()));
 }
@@ -630,6 +691,13 @@
 }
 
 
+void FullCodeGenerator::EmitGeneratorReturn(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::RETURN);
+}
+
+
 void FullCodeGenerator::EmitGeneratorThrow(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 2);
@@ -843,10 +911,7 @@
   PrepareForBailoutForId(stmt->IfId(), NO_REGISTERS);
 }
 
-
-void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
-  Comment cmnt(masm_,  "[ ContinueStatement");
-  SetStatementPosition(stmt);
+void FullCodeGenerator::EmitContinue(Statement* target) {
   NestedStatement* current = nesting_stack_;
   int stack_depth = 0;
   int context_length = 0;
@@ -855,7 +920,15 @@
   // try...finally on our way out, we will unconditionally preserve the
   // accumulator on the stack.
   ClearAccumulator();
-  while (!current->IsContinueTarget(stmt->target())) {
+  while (!current->IsContinueTarget(target)) {
+    if (current->IsTryFinally()) {
+      Comment cmnt(masm(), "[ Deferred continue through finally");
+      current->Exit(&stack_depth, &context_length);
+      DCHECK_EQ(0, stack_depth);
+      DCHECK_EQ(0, context_length);
+      current->AsTryFinally()->deferred_commands()->RecordContinue(target);
+      return;
+    }
     current = current->Exit(&stack_depth, &context_length);
   }
   __ Drop(stack_depth);
@@ -871,10 +944,13 @@
   __ jmp(current->AsIteration()->continue_label());
 }
 
-
-void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
-  Comment cmnt(masm_,  "[ BreakStatement");
+void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+  Comment cmnt(masm_, "[ ContinueStatement");
   SetStatementPosition(stmt);
+  EmitContinue(stmt->target());
+}
+
+void FullCodeGenerator::EmitBreak(Statement* target) {
   NestedStatement* current = nesting_stack_;
   int stack_depth = 0;
   int context_length = 0;
@@ -883,7 +959,15 @@
   // try...finally on our way out, we will unconditionally preserve the
   // accumulator on the stack.
   ClearAccumulator();
-  while (!current->IsBreakTarget(stmt->target())) {
+  while (!current->IsBreakTarget(target)) {
+    if (current->IsTryFinally()) {
+      Comment cmnt(masm(), "[ Deferred break through finally");
+      current->Exit(&stack_depth, &context_length);
+      DCHECK_EQ(0, stack_depth);
+      DCHECK_EQ(0, context_length);
+      current->AsTryFinally()->deferred_commands()->RecordBreak(target);
+      return;
+    }
     current = current->Exit(&stack_depth, &context_length);
   }
   __ Drop(stack_depth);
@@ -899,24 +983,62 @@
   __ jmp(current->AsBreakable()->break_label());
 }
 
+void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+  Comment cmnt(masm_, "[ BreakStatement");
+  SetStatementPosition(stmt);
+  EmitBreak(stmt->target());
+}
 
-void FullCodeGenerator::EmitUnwindBeforeReturn() {
+void FullCodeGenerator::EmitUnwindAndReturn() {
   NestedStatement* current = nesting_stack_;
   int stack_depth = 0;
   int context_length = 0;
   while (current != NULL) {
+    if (current->IsTryFinally()) {
+      Comment cmnt(masm(), "[ Deferred return through finally");
+      current->Exit(&stack_depth, &context_length);
+      DCHECK_EQ(0, stack_depth);
+      DCHECK_EQ(0, context_length);
+      current->AsTryFinally()->deferred_commands()->RecordReturn();
+      return;
+    }
     current = current->Exit(&stack_depth, &context_length);
   }
   __ Drop(stack_depth);
+  EmitReturnSequence();
 }
 
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+  // Stack: receiver, home_object
+  SetExpressionPosition(prop);
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(prop->IsSuperAccess());
+
+  PushOperand(key->value());
+  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+}
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+  SetExpressionPosition(prop);
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+  __ Move(LoadDescriptor::SlotRegister(),
+          SmiFromSlot(prop->PropertyFeedbackSlot()));
+  CallIC(ic);
+}
+
+void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
+  // Stack: receiver, home_object, key.
+  SetExpressionPosition(prop);
+  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+}
 
 void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
                                         BailoutId bailout_id) {
   VisitForStackValue(property->key());
-  __ CallRuntime(Runtime::kToName);
+  CallRuntimeWithOperands(Runtime::kToName);
   PrepareForBailoutForId(bailout_id, NO_REGISTERS);
-  __ Push(result_register());
+  PushOperand(result_register());
 }
 
 
@@ -925,8 +1047,7 @@
   SetStatementPosition(stmt);
   Expression* expr = stmt->expression();
   VisitForAccumulatorValue(expr);
-  EmitUnwindBeforeReturn();
-  EmitReturnSequence();
+  EmitUnwindAndReturn();
 }
 
 
@@ -939,9 +1060,9 @@
   __ Move(callable.descriptor().GetRegisterParameter(0), result_register());
   __ Call(callable.code(), RelocInfo::CODE_TARGET);
   PrepareForBailoutForId(stmt->ToObjectId(), NO_REGISTERS);
-  __ Push(result_register());
+  PushOperand(result_register());
   PushFunctionArgumentForContextAllocation();
-  __ CallRuntime(Runtime::kPushWithContext);
+  CallRuntimeWithOperands(Runtime::kPushWithContext);
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
 
@@ -1129,16 +1250,15 @@
   Label try_entry, handler_entry, exit;
   __ jmp(&try_entry);
   __ bind(&handler_entry);
-  PrepareForBailoutForId(stmt->HandlerId(), NO_REGISTERS);
   ClearPendingMessage();
 
   // Exception handler code, the exception is in the result register.
   // Extend the context before executing the catch block.
   { Comment cmnt(masm_, "[ Extend catch context");
-    __ Push(stmt->variable()->name());
-    __ Push(result_register());
+    PushOperand(stmt->variable()->name());
+    PushOperand(result_register());
     PushFunctionArgumentForContextAllocation();
-    __ CallRuntime(Runtime::kPushCatchContext);
+    CallRuntimeWithOperands(Runtime::kPushCatchContext);
     StoreToFrameField(StandardFrameConstants::kContextOffset,
                       context_register());
   }
@@ -1178,51 +1298,40 @@
   // executing the try body, and removing it again afterwards.
   //
   // The try-finally construct can enter the finally block in three ways:
-  // 1. By exiting the try-block normally. This removes the try-handler and
-  //    calls the finally block code before continuing.
+  // 1. By exiting the try-block normally. This exits the try block,
+  //    pushes the continuation token and falls through to the finally
+  //    block.
   // 2. By exiting the try-block with a function-local control flow transfer
-  //    (break/continue/return). The site of the, e.g., break removes the
-  //    try handler and calls the finally block code before continuing
-  //    its outward control transfer.
-  // 3. By exiting the try-block with a thrown exception.
-  //    This can happen in nested function calls. It traverses the try-handler
-  //    chain and consumes the try-handler entry before jumping to the
-  //    handler code. The handler code then calls the finally-block before
-  //    rethrowing the exception.
-  //
-  // The finally block must assume a return address on top of the stack
-  // (or in the link register on ARM chips) and a value (return value or
-  // exception) in the result register (rax/eax/r0), both of which must
-  // be preserved. The return address isn't GC-safe, so it should be
-  // cooked before GC.
+  //    (break/continue/return). The site of the, e.g., break exits the
+  //    try block, pushes the continuation token and jumps to the
+  //    finally block. After the finally block executes, the execution
+  //    continues based on the continuation token to a block that
+  //    continues with the control flow transfer.
+  // 3. By exiting the try-block with a thrown exception. In the handler,
+  //    we push the exception and continuation token and jump to the
+  //    finally block (which will again dispatch based on the token once
+  //    it is finished).
+
   Label try_entry, handler_entry, finally_entry;
+  DeferredCommands deferred(this, &finally_entry);
 
   // Jump to try-handler setup and try-block code.
   __ jmp(&try_entry);
   __ bind(&handler_entry);
-  PrepareForBailoutForId(stmt->HandlerId(), NO_REGISTERS);
 
   // Exception handler code.  This code is only executed when an exception
-  // is thrown.  The exception is in the result register, and must be
-  // preserved by the finally block.  Call the finally block and then
-  // rethrow the exception if it returns.
-  __ Call(&finally_entry);
-  __ Push(result_register());
-  __ CallRuntime(Runtime::kReThrow);
-
-  // Finally block implementation.
-  __ bind(&finally_entry);
-  EnterFinallyBlock();
-  { Finally finally_body(this);
-    Visit(stmt->finally_block());
+  // is thrown.  Record the continuation and jump to the finally block.
+  {
+    Comment cmt_handler(masm(), "[ Finally handler");
+    deferred.RecordThrow();
   }
-  ExitFinallyBlock();  // Return to the calling code.
 
   // Set up try handler.
   __ bind(&try_entry);
   int handler_index = NewHandlerTableEntry();
   EnterTryBlock(handler_index, &handler_entry);
-  { TryFinally try_body(this, &finally_entry);
+  {
+    TryFinally try_body(this, &deferred);
     Visit(stmt->try_block());
   }
   ExitTryBlock(handler_index);
@@ -1231,7 +1340,25 @@
   // finally block will unconditionally preserve the result register on the
   // stack.
   ClearAccumulator();
-  __ Call(&finally_entry);
+  deferred.EmitFallThrough();
+  // Fall through to the finally block.
+
+  // Finally block implementation.
+  __ bind(&finally_entry);
+  Comment cmnt_finally(masm(), "[ Finally block");
+  OperandStackDepthIncrement(2);  // Token and accumulator are on stack.
+  EnterFinallyBlock();
+  {
+    Finally finally_body(this);
+    Visit(stmt->finally_block());
+  }
+  ExitFinallyBlock();
+  OperandStackDepthDecrement(2);  // Token and accumulator were on stack.
+
+  {
+    Comment cmnt_deferred(masm(), "[ Post-finally dispatch");
+    deferred.EmitCommands();  // Return to the calling code.
+  }
 }
 
 
@@ -1256,6 +1383,7 @@
   Label true_case, false_case, done;
   VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
 
+  int original_stack_depth = operand_stack_depth_;
   PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
   __ bind(&true_case);
   SetExpressionPosition(expr->then_expression());
@@ -1270,6 +1398,7 @@
     __ jmp(&done);
   }
 
+  operand_stack_depth_ = original_stack_depth;
   PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
   __ bind(&false_case);
   SetExpressionPosition(expr->else_expression());
@@ -1308,28 +1437,36 @@
     EnterBlockScopeIfNeeded block_scope_state(
         this, lit->scope(), lit->EntryId(), lit->DeclsId(), lit->ExitId());
 
-    if (lit->raw_name() != NULL) {
-      __ Push(lit->name());
-    } else {
-      __ Push(isolate()->factory()->undefined_value());
-    }
-
     if (lit->extends() != NULL) {
       VisitForStackValue(lit->extends());
     } else {
-      __ Push(isolate()->factory()->the_hole_value());
+      PushOperand(isolate()->factory()->the_hole_value());
     }
 
     VisitForStackValue(lit->constructor());
 
-    __ Push(Smi::FromInt(lit->start_position()));
-    __ Push(Smi::FromInt(lit->end_position()));
+    PushOperand(Smi::FromInt(lit->start_position()));
+    PushOperand(Smi::FromInt(lit->end_position()));
 
-    __ CallRuntime(Runtime::kDefineClass);
+    CallRuntimeWithOperands(Runtime::kDefineClass);
     PrepareForBailoutForId(lit->CreateLiteralId(), TOS_REG);
+    PushOperand(result_register());
+
+    // Load the "prototype" from the constructor.
+    __ Move(LoadDescriptor::ReceiverRegister(), result_register());
+    __ LoadRoot(LoadDescriptor::NameRegister(),
+                Heap::kprototype_stringRootIndex);
+    __ Move(LoadDescriptor::SlotRegister(), SmiFromSlot(lit->PrototypeSlot()));
+    CallLoadIC(NOT_INSIDE_TYPEOF);
+    PrepareForBailoutForId(lit->PrototypeId(), TOS_REG);
+    PushOperand(result_register());
 
     EmitClassDefineProperties(lit);
 
+    // Set both the prototype and constructor to have fast properties, and also
+    // freeze them in strong mode.
+    CallRuntimeWithOperands(Runtime::kFinalizeClassDefinition);
+
     if (lit->class_variable_proxy() != nullptr) {
       EmitVariableAssignment(lit->class_variable_proxy()->var(), Token::INIT,
                              lit->ProxySlot());
@@ -1343,35 +1480,8 @@
 void FullCodeGenerator::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* expr) {
   Comment cmnt(masm_, "[ NativeFunctionLiteral");
-
-  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate());
-
-  // Compute the function template for the native function.
-  Handle<String> name = expr->name();
-  v8::Local<v8::FunctionTemplate> fun_template =
-      expr->extension()->GetNativeFunctionTemplate(v8_isolate,
-                                                   v8::Utils::ToLocal(name));
-  DCHECK(!fun_template.IsEmpty());
-
-  // Instantiate the function and create a shared function info from it.
-  Handle<JSFunction> fun = Handle<JSFunction>::cast(Utils::OpenHandle(
-      *fun_template->GetFunction(v8_isolate->GetCurrentContext())
-           .ToLocalChecked()));
-  const int literals = fun->NumberOfLiterals();
-  Handle<Code> code = Handle<Code>(fun->shared()->code());
-  Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
   Handle<SharedFunctionInfo> shared =
-      isolate()->factory()->NewSharedFunctionInfo(
-          name, literals, FunctionKind::kNormalFunction, code,
-          Handle<ScopeInfo>(fun->shared()->scope_info()),
-          Handle<TypeFeedbackVector>(fun->shared()->feedback_vector()));
-  shared->set_construct_stub(*construct_stub);
-
-  // Copy the function data to the shared function info.
-  shared->set_function_data(fun->shared()->function_data());
-  int parameters = fun->shared()->internal_formal_parameter_count();
-  shared->set_internal_formal_parameter_count(parameters);
-
+      Compiler::GetSharedFunctionInfoForNative(expr->extension(), expr->name());
   EmitNewClosure(shared, false);
 }
 
@@ -1380,8 +1490,12 @@
   Comment cmnt(masm_, "[ Throw");
   VisitForStackValue(expr->exception());
   SetExpressionPosition(expr);
-  __ CallRuntime(Runtime::kThrow);
+  CallRuntimeWithOperands(Runtime::kThrow);
   // Never returns here.
+
+  // Even though this expression doesn't produce a value, we need to simulate
+  // plugging of the value context to ensure stack depth tracking is in sync.
+  if (context()->IsStackValue()) OperandStackDepthIncrement(1);
 }
 
 
@@ -1390,17 +1504,14 @@
   entry->range_start = masm()->pc_offset();
   entry->handler_offset = handler->pos();
   entry->try_catch_depth = try_catch_depth_;
+  entry->stack_depth = operand_stack_depth_;
 
-  // Determine expression stack depth of try statement.
-  int stack_depth = info_->scope()->num_stack_slots();  // Include stack locals.
-  for (NestedStatement* current = nesting_stack_; current != NULL; /*nop*/) {
-    current = current->AccumulateDepth(&stack_depth);
-  }
-  entry->stack_depth = stack_depth;
+  // We are using the operand stack depth, check for accuracy.
+  EmitOperandStackDepthCheck();
 
   // Push context onto operand stack.
   STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
-  __ Push(context_register());
+  PushOperand(context_register());
 }
 
 
@@ -1409,7 +1520,7 @@
   entry->range_end = masm()->pc_offset();
 
   // Drop context from operand stack.
-  __ Drop(TryBlockConstant::kElementCount);
+  DropOperands(TryBlockConstant::kElementCount);
 }
 
 
@@ -1420,7 +1531,9 @@
   expr->return_is_recorded_ = false;
 #endif
 
-  Comment cmnt(masm_, "[ Call");
+  Comment cmnt(masm_, (expr->tail_call_mode() == TailCallMode::kAllow)
+                          ? "[ TailCall"
+                          : "[ Call");
   Expression* callee = expr->expression();
   Call::CallType call_type = expr->GetCallType(isolate());
 
@@ -1460,6 +1573,7 @@
     case Call::OTHER_CALL:
       // Call to an arbitrary expression not handled specially above.
       VisitForStackValue(callee);
+      OperandStackDepthIncrement(1);
       __ PushRoot(Heap::kUndefinedValueRootIndex);
       // Emit function call.
       EmitCall(expr);
@@ -1481,8 +1595,7 @@
 }
 
 
-void FullCodeGenerator::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* expr) {
+void FullCodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
   Visit(expr->expression());
 }
 
@@ -1506,13 +1619,49 @@
     // Down to the handler block and also drop context.
     __ Drop(*stack_depth + kElementCount);
   }
-  __ Call(finally_entry_);
-
   *stack_depth = 0;
   *context_length = 0;
   return previous_;
 }
 
+void FullCodeGenerator::DeferredCommands::RecordBreak(Statement* target) {
+  TokenId token = dispenser_.GetBreakContinueToken();
+  commands_.push_back({kBreak, token, target});
+  EmitJumpToFinally(token);
+}
+
+void FullCodeGenerator::DeferredCommands::RecordContinue(Statement* target) {
+  TokenId token = dispenser_.GetBreakContinueToken();
+  commands_.push_back({kContinue, token, target});
+  EmitJumpToFinally(token);
+}
+
+void FullCodeGenerator::DeferredCommands::RecordReturn() {
+  if (return_token_ == TokenDispenserForFinally::kInvalidToken) {
+    return_token_ = TokenDispenserForFinally::kReturnToken;
+    commands_.push_back({kReturn, return_token_, nullptr});
+  }
+  EmitJumpToFinally(return_token_);
+}
+
+void FullCodeGenerator::DeferredCommands::RecordThrow() {
+  if (throw_token_ == TokenDispenserForFinally::kInvalidToken) {
+    throw_token_ = TokenDispenserForFinally::kThrowToken;
+    commands_.push_back({kThrow, throw_token_, nullptr});
+  }
+  EmitJumpToFinally(throw_token_);
+}
+
+void FullCodeGenerator::DeferredCommands::EmitFallThrough() {
+  __ Push(Smi::FromInt(TokenDispenserForFinally::kFallThroughToken));
+  __ Push(result_register());
+}
+
+void FullCodeGenerator::DeferredCommands::EmitJumpToFinally(TokenId token) {
+  __ Push(Smi::FromInt(token));
+  __ Push(result_register());
+  __ jmp(finally_entry_);
+}
 
 bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
   Expression* sub_expr;
@@ -1640,9 +1789,9 @@
     {
       if (needs_block_context_) {
         Comment cmnt(masm(), "[ Extend block context");
-        __ Push(scope->GetScopeInfo(codegen->isolate()));
+        codegen_->PushOperand(scope->GetScopeInfo(codegen->isolate()));
         codegen_->PushFunctionArgumentForContextAllocation();
-        __ CallRuntime(Runtime::kPushBlockContext);
+        codegen_->CallRuntimeWithOperands(Runtime::kPushBlockContext);
 
         // Replace the context stored in the frame.
         codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
diff --git a/src/full-codegen/full-codegen.h b/src/full-codegen/full-codegen.h
index 52eddaf..6ab0231 100644
--- a/src/full-codegen/full-codegen.h
+++ b/src/full-codegen/full-codegen.h
@@ -42,6 +42,7 @@
         nesting_stack_(NULL),
         loop_depth_(0),
         try_catch_depth_(0),
+        operand_stack_depth_(0),
         globals_(NULL),
         context_(NULL),
         bailout_entries_(info->HasDeoptimizationSupport()
@@ -96,9 +97,12 @@
 #error Unsupported target architecture.
 #endif
 
+  static Register result_register();
+
  private:
   class Breakable;
   class Iteration;
+  class TryFinally;
 
   class TestContext;
 
@@ -115,11 +119,13 @@
       codegen_->nesting_stack_ = previous_;
     }
 
-    virtual Breakable* AsBreakable() { return NULL; }
-    virtual Iteration* AsIteration() { return NULL; }
+    virtual Breakable* AsBreakable() { return nullptr; }
+    virtual Iteration* AsIteration() { return nullptr; }
+    virtual TryFinally* AsTryFinally() { return nullptr; }
 
     virtual bool IsContinueTarget(Statement* target) { return false; }
     virtual bool IsBreakTarget(Statement* target) { return false; }
+    virtual bool IsTryFinally() { return false; }
 
     // Notify the statement that we are exiting it via break, continue, or
     // return and give it a chance to generate cleanup code.  Return the
@@ -131,11 +137,6 @@
       return previous_;
     }
 
-    // Like the Exit() method above, but limited to accumulating stack depth.
-    virtual NestedStatement* AccumulateDepth(int* stack_depth) {
-      return previous_;
-    }
-
    protected:
     MacroAssembler* masm() { return codegen_->masm(); }
 
@@ -211,10 +212,43 @@
       *stack_depth += kElementCount;
       return previous_;
     }
-    NestedStatement* AccumulateDepth(int* stack_depth) override {
-      *stack_depth += kElementCount;
-      return previous_;
-    }
+  };
+
+  class DeferredCommands {
+   public:
+    enum Command { kReturn, kThrow, kBreak, kContinue };
+    typedef int TokenId;
+    struct DeferredCommand {
+      Command command;
+      TokenId token;
+      Statement* target;
+    };
+
+    DeferredCommands(FullCodeGenerator* codegen, Label* finally_entry)
+        : codegen_(codegen),
+          commands_(codegen->zone()),
+          return_token_(TokenDispenserForFinally::kInvalidToken),
+          throw_token_(TokenDispenserForFinally::kInvalidToken),
+          finally_entry_(finally_entry) {}
+
+    void EmitCommands();
+
+    void RecordBreak(Statement* target);
+    void RecordContinue(Statement* target);
+    void RecordReturn();
+    void RecordThrow();
+    void EmitFallThrough();
+
+   private:
+    MacroAssembler* masm() { return codegen_->masm(); }
+    void EmitJumpToFinally(TokenId token);
+
+    FullCodeGenerator* codegen_;
+    ZoneVector<DeferredCommand> commands_;
+    TokenDispenserForFinally dispenser_;
+    TokenId return_token_;
+    TokenId throw_token_;
+    Label* finally_entry_;
   };
 
   // The try block of a try/finally statement.
@@ -222,18 +256,18 @@
    public:
     static const int kElementCount = TryBlockConstant::kElementCount;
 
-    TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
-        : NestedStatement(codegen), finally_entry_(finally_entry) {
-    }
+    TryFinally(FullCodeGenerator* codegen, DeferredCommands* commands)
+        : NestedStatement(codegen), deferred_commands_(commands) {}
 
     NestedStatement* Exit(int* stack_depth, int* context_length) override;
-    NestedStatement* AccumulateDepth(int* stack_depth) override {
-      *stack_depth += kElementCount;
-      return previous_;
-    }
+
+    bool IsTryFinally() override { return true; }
+    TryFinally* AsTryFinally() override { return this; }
+
+    DeferredCommands* deferred_commands() { return deferred_commands_; }
 
    private:
-    Label* finally_entry_;
+    DeferredCommands* deferred_commands_;
   };
 
   // The finally block of a try/finally statement.
@@ -247,10 +281,6 @@
       *stack_depth += kElementCount;
       return previous_;
     }
-    NestedStatement* AccumulateDepth(int* stack_depth) override {
-      *stack_depth += kElementCount;
-      return previous_;
-    }
   };
 
   // The body of a for/in loop.
@@ -266,10 +296,6 @@
       *stack_depth += kElementCount;
       return previous_;
     }
-    NestedStatement* AccumulateDepth(int* stack_depth) override {
-      *stack_depth += kElementCount;
-      return previous_;
-    }
   };
 
 
@@ -354,18 +380,21 @@
   MemOperand VarOperand(Variable* var, Register scratch);
 
   void VisitForEffect(Expression* expr) {
+    if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
     EffectContext context(this);
     Visit(expr);
     PrepareForBailout(expr, NO_REGISTERS);
   }
 
   void VisitForAccumulatorValue(Expression* expr) {
+    if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
     AccumulatorValueContext context(this);
     Visit(expr);
     PrepareForBailout(expr, TOS_REG);
   }
 
   void VisitForStackValue(Expression* expr) {
+    if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
     StackValueContext context(this);
     Visit(expr);
     PrepareForBailout(expr, NO_REGISTERS);
@@ -375,6 +404,7 @@
                        Label* if_true,
                        Label* if_false,
                        Label* fall_through) {
+    if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
     TestContext context(this, expr, if_true, if_false, fall_through);
     Visit(expr);
     // For test contexts, we prepare for bailout before branching, not at
@@ -389,6 +419,34 @@
   void DeclareGlobals(Handle<FixedArray> pairs);
   int DeclareGlobalsFlags();
 
+  // Push, pop or drop values onto/from the operand stack.
+  void PushOperand(Register reg);
+  void PopOperand(Register reg);
+  void DropOperands(int count);
+
+  // Convenience helpers for pushing onto the operand stack.
+  void PushOperand(MemOperand operand);
+  void PushOperand(Handle<Object> handle);
+  void PushOperand(Smi* smi);
+
+  // Convenience helpers for pushing/popping multiple operands.
+  void PushOperands(Register reg1, Register reg2);
+  void PushOperands(Register reg1, Register reg2, Register reg3);
+  void PushOperands(Register reg1, Register reg2, Register reg3, Register reg4);
+  void PopOperands(Register reg1, Register reg2);
+
+  // Convenience helper for calling a runtime function that consumes arguments
+  // from the operand stack (only usable for functions with known arity).
+  void CallRuntimeWithOperands(Runtime::FunctionId function_id);
+
+  // Static tracking of the operand stack depth.
+  void OperandStackDepthDecrement(int count);
+  void OperandStackDepthIncrement(int count);
+
+  // Generate debug code that verifies that our static tracking of the operand
+  // stack depth is in sync with the actual operand stack during runtime.
+  void EmitOperandStackDepthCheck();
+
   // Generate code to create an iterator result object.  The "value" property is
   // set to a value popped from the stack, and "done" is set according to the
   // argument.  The result object is left in the result register.
@@ -455,11 +513,13 @@
 
   // Emit code to pop values from the stack associated with nested statements
   // like try/catch, try/finally, etc, running the finallies and unwinding the
-  // handlers as needed.
-  void EmitUnwindBeforeReturn();
+  // handlers as needed. Also emits the return sequence if necessary (i.e.,
+  // if the return is not delayed by a finally block).
+  void EmitUnwindAndReturn();
 
   // Platform-specific return sequence
   void EmitReturnSequence();
+  void EmitProfilingCounterHandlingForReturnSequence(bool is_tail_call);
 
   // Platform-specific code sequences for calls
   void EmitCall(Call* expr, ConvertReceiverMode = ConvertReceiverMode::kAny);
@@ -477,26 +537,18 @@
   F(IsRegExp)                           \
   F(IsJSProxy)                          \
   F(Call)                               \
-  F(ArgumentsLength)                    \
-  F(Arguments)                          \
   F(ValueOf)                            \
-  F(SetValueOf)                         \
-  F(IsDate)                             \
   F(StringCharFromCode)                 \
   F(StringCharAt)                       \
   F(OneByteSeqStringSetChar)            \
   F(TwoByteSeqStringSetChar)            \
-  F(ObjectEquals)                       \
-  F(IsFunction)                         \
   F(IsJSReceiver)                       \
-  F(IsSimdValue)                        \
   F(MathPow)                            \
-  F(IsMinusZero)                        \
   F(HasCachedArrayIndex)                \
   F(GetCachedArrayIndex)                \
   F(GetSuperConstructor)                \
-  F(FastOneByteArrayJoin)               \
   F(GeneratorNext)                      \
+  F(GeneratorReturn)                    \
   F(GeneratorThrow)                     \
   F(DebugBreakInOptimizedCode)          \
   F(ClassOf)                            \
@@ -633,7 +685,7 @@
               TypeFeedbackId id = TypeFeedbackId::None());
 
   // Inside typeof reference errors are never thrown.
-  void CallLoadIC(TypeofMode typeof_mode, LanguageMode language_mode = SLOPPY,
+  void CallLoadIC(TypeofMode typeof_mode,
                   TypeFeedbackId id = TypeFeedbackId::None());
   void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
 
@@ -669,6 +721,9 @@
   void ExitFinallyBlock();
   void ClearPendingMessage();
 
+  void EmitContinue(Statement* target);
+  void EmitBreak(Statement* target);
+
   // Loop nesting counter.
   int loop_depth() { return loop_depth_; }
   void increment_loop_depth() { loop_depth_++; }
@@ -693,7 +748,6 @@
   FunctionLiteral* literal() const { return info_->literal(); }
   Scope* scope() { return scope_; }
 
-  static Register result_register();
   static Register context_register();
 
   // Set fields in the stack frame. Offsets are the frame pointer relative
@@ -945,6 +999,7 @@
   NestedStatement* nesting_stack_;
   int loop_depth_;
   int try_catch_depth_;
+  int operand_stack_depth_;
   ZoneList<Handle<Object> >* globals_;
   Handle<FixedArray> modules_;
   int module_index_;
@@ -954,7 +1009,6 @@
   ZoneVector<HandlerTableEntry> handler_table_;
   int ic_total_count_;
   Handle<Cell> profiling_counter_;
-  bool generate_debug_code_;
 
   friend class NestedStatement;
 
diff --git a/src/full-codegen/ia32/full-codegen-ia32.cc b/src/full-codegen/ia32/full-codegen-ia32.cc
index 4ef3a09..fadcd7c 100644
--- a/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -17,8 +17,7 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
 
 class JumpPatchSite BASE_EMBEDDED {
  public:
@@ -68,6 +67,7 @@
     __ j(cc, target, distance);
   }
 
+  MacroAssembler* masm() { return masm_; }
   MacroAssembler* masm_;
   Label patch_site_;
 #ifdef DEBUG
@@ -99,13 +99,6 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-    __ int3();
-  }
-#endif
-
   if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
     int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
     __ mov(ecx, Operand(esp, receiver_offset));
@@ -126,6 +119,7 @@
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
     DCHECK(!IsGeneratorFunction(literal()->kind()) || locals_count == 0);
+    OperandStackDepthIncrement(locals_count);
     if (locals_count == 1) {
       __ push(Immediate(isolate()->factory()->undefined_value()));
     } else if (locals_count > 1) {
@@ -259,48 +253,33 @@
   Variable* rest_param = scope()->rest_parameter(&rest_index);
   if (rest_param) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
-
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-
-    __ mov(RestParamAccessDescriptor::parameter_count(),
-           Immediate(Smi::FromInt(num_parameters)));
-    __ lea(RestParamAccessDescriptor::parameter_pointer(),
-           Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
-    __ mov(RestParamAccessDescriptor::rest_parameter_index(),
-           Immediate(Smi::FromInt(rest_index)));
-    function_in_register = false;
-
-    RestParamAccessStub stub(isolate());
+    if (!function_in_register) {
+      __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+    }
+    FastNewRestParameterStub stub(isolate());
     __ CallStub(&stub);
+    function_in_register = false;
     SetVar(rest_param, eax, ebx, edx);
   }
 
   Variable* arguments = scope()->arguments();
   if (arguments != NULL) {
-    // Function uses arguments object.
+    // Arguments object must be allocated after the context object, in
+    // case the "arguments" or ".arguments" variables are in the context.
     Comment cmnt(masm_, "[ Allocate arguments object");
-    DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
     if (!function_in_register) {
       __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     }
-    // Receiver is just before the parameters on the caller's stack.
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-    __ mov(ArgumentsAccessNewDescriptor::parameter_count(),
-           Immediate(Smi::FromInt(num_parameters)));
-    __ lea(ArgumentsAccessNewDescriptor::parameter_pointer(),
-           Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
-
-    // Arguments to ArgumentsAccessStub:
-    //   function, parameter pointer, parameter count.
-    // The stub will rewrite parameter pointer and parameter count if the
-    // previous stack frame was an arguments adapter frame.
-    bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
-    ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
-        is_unmapped, literal()->has_duplicate_parameters());
-    ArgumentsAccessStub stub(isolate(), type);
-    __ CallStub(&stub);
+    if (is_strict(language_mode()) || !has_simple_parameters()) {
+      FastNewStrictArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    } else if (literal()->has_duplicate_parameters()) {
+      __ Push(edi);
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+    } else {
+      FastNewSloppyArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    }
 
     SetVar(arguments, eax, ebx, edx);
   }
@@ -401,6 +380,30 @@
   PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
 }
 
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+    bool is_tail_call) {
+  // Pretend that the exit is a backwards jump to the entry.
+  int weight = 1;
+  if (info_->ShouldSelfOptimize()) {
+    weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+  } else {
+    int distance = masm_->pc_offset();
+    weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+  }
+  EmitProfilingCounterDecrement(weight);
+  Label ok;
+  __ j(positive, &ok, Label::kNear);
+  // Don't need to save result register if we are going to do a tail call.
+  if (!is_tail_call) {
+    __ push(eax);
+  }
+  __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+  if (!is_tail_call) {
+    __ pop(eax);
+  }
+  EmitProfilingCounterReset();
+  __ bind(&ok);
+}
 
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
@@ -413,24 +416,7 @@
       __ push(eax);
       __ CallRuntime(Runtime::kTraceExit);
     }
-    // Pretend that the exit is a backwards jump to the entry.
-    int weight = 1;
-    if (info_->ShouldSelfOptimize()) {
-      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-    } else {
-      int distance = masm_->pc_offset();
-      weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kCodeSizeMultiplier));
-    }
-    EmitProfilingCounterDecrement(weight);
-    Label ok;
-    __ j(positive, &ok, Label::kNear);
-    __ push(eax);
-    __ call(isolate()->builtins()->InterruptCheck(),
-            RelocInfo::CODE_TARGET);
-    __ pop(eax);
-    EmitProfilingCounterReset();
-    __ bind(&ok);
+    EmitProfilingCounterHandlingForReturnSequence(false);
 
     SetReturnPosition(literal());
     __ leave();
@@ -446,7 +432,7 @@
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   MemOperand operand = codegen()->VarOperand(var, result_register());
   // Memory operands can be pushed directly.
-  __ push(operand);
+  codegen()->PushOperand(operand);
 }
 
 
@@ -487,6 +473,7 @@
 
 
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+  codegen()->OperandStackDepthIncrement(1);
   if (lit->IsSmi()) {
     __ SafePush(Immediate(lit));
   } else {
@@ -500,7 +487,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -525,41 +512,14 @@
 }
 
 
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
-                                                   Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
-    int count,
-    Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-  __ Move(result_register(), reg);
-}
-
-
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
   DCHECK(count > 0);
-  if (count > 1) __ Drop(count - 1);
+  if (count > 1) codegen()->DropOperands(count - 1);
   __ mov(Operand(esp, 0), reg);
 }
 
 
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
-                                                 Register reg) const {
-  DCHECK(count > 0);
-  // For simplicity we always test the accumulator register.
-  __ Drop(count);
-  __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
-  codegen()->DoTest(this);
-}
-
-
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
   DCHECK(materialize_true == materialize_false);
@@ -583,6 +543,7 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
+  codegen()->OperandStackDepthIncrement(1);
   Label done;
   __ bind(materialize_true);
   __ push(Immediate(isolate()->factory()->true_value()));
@@ -609,6 +570,7 @@
 
 
 void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+  codegen()->OperandStackDepthIncrement(1);
   Handle<Object> value = flag
       ? isolate()->factory()->true_value()
       : isolate()->factory()->false_value();
@@ -731,7 +693,7 @@
 void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
   // The variable in the declaration always resides in the current context.
   DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
-  if (generate_debug_code_) {
+  if (FLAG_debug_code) {
     // Check that we're not inside a with or catch context.
     __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
     __ cmp(ebx, isolate()->factory()->with_context_map());
@@ -846,11 +808,10 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ FunctionDeclaration");
-      __ push(Immediate(variable->name()));
+      PushOperand(variable->name());
       VisitForStackValue(declaration->fun());
-      __ push(
-          Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
       break;
     }
   }
@@ -923,8 +884,8 @@
     }
 
     SetExpressionPosition(clause);
-    Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
-                                             strength(language_mode())).code();
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -946,7 +907,7 @@
   // Discard the test value and jump to the default if present, otherwise to
   // the end of the statement.
   __ bind(&next_test);
-  __ Drop(1);  // Switch value is no longer needed.
+  DropOperands(1);  // Switch value is no longer needed.
   if (default_clause == NULL) {
     __ jmp(nested_statement.break_label());
   } else {
@@ -977,22 +938,21 @@
   ForIn loop_statement(this, stmt);
   increment_loop_depth();
 
-  // Get the object to enumerate over. If the object is null or undefined, skip
-  // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  __ cmp(eax, isolate()->factory()->undefined_value());
-  __ j(equal, &exit);
-  __ cmp(eax, isolate()->factory()->null_value());
-  __ j(equal, &exit);
+  OperandStackDepthIncrement(ForIn::kElementCount);
 
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
-  // Convert the object to a JS object.
+  // If the object is null or undefined, skip over the loop, otherwise convert
+  // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
   Label convert, done_convert;
   __ JumpIfSmi(eax, &convert, Label::kNear);
   __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
   __ j(above_equal, &done_convert, Label::kNear);
+  __ cmp(eax, isolate()->factory()->undefined_value());
+  __ j(equal, &exit);
+  __ cmp(eax, isolate()->factory()->null_value());
+  __ j(equal, &exit);
   __ bind(&convert);
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
@@ -1000,15 +960,13 @@
   PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
   __ push(eax);
 
-  // Check for proxies.
-  Label call_runtime, use_cache, fixed_array;
-  __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
-  __ j(equal, &call_runtime);
-
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
+  // Note: Proxies never have an enum cache, so will always take the
+  // slow path.
+  Label call_runtime, use_cache, fixed_array;
   __ CheckEnumCache(&call_runtime);
 
   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
@@ -1017,7 +975,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(eax);
-  __ CallRuntime(Runtime::kGetPropertyNamesFast);
+  __ CallRuntime(Runtime::kForInEnumerate);
   PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          isolate()->factory()->meta_map());
@@ -1051,14 +1009,15 @@
   __ bind(&fixed_array);
 
   // No need for a write barrier, we are storing a Smi in the feedback vector.
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(ebx);
-  int vector_index = SmiFromSlot(slot)->value();
   __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
          Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ push(Immediate(Smi::FromInt(1)));  // Smi(1) indicates slow check
   __ push(eax);  // Array
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
   __ push(eax);  // Fixed array length (as smi).
+  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
   __ push(Immediate(Smi::FromInt(0)));  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1084,6 +1043,16 @@
   __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
+  // We might get here from TurboFan or Crankshaft when something in the
+  // for-in loop body deopts and only now notice in fullcodegen, that we
+  // can now longer use the enum cache, i.e. left fast mode. So better record
+  // this information here, in case we later OSR back into this loop or
+  // reoptimize the whole function w/o rerunning the loop with the slow
+  // mode object in fullcodegen (which would result in a deopt loop).
+  __ EmitLoadTypeFeedbackVector(edx);
+  __ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
+         Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
   // just skip it.
@@ -1121,6 +1090,7 @@
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
   __ add(esp, Immediate(5 * kPointerSize));
+  OperandStackDepthDecrement(ForIn::kElementCount);
 
   // Exit and decrement the loop depth.
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1363,12 +1333,11 @@
       // by eval-introduced variables.
       EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
       __ bind(&slow);
-      __ push(esi);  // Context.
       __ push(Immediate(var->name()));
       Runtime::FunctionId function_id =
           typeof_mode == NOT_INSIDE_TYPEOF
               ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotNoReferenceError;
+              : Runtime::kLoadLookupSlotInsideTypeof;
       __ CallRuntime(function_id);
       __ bind(&done);
       context()->Plug(eax);
@@ -1393,7 +1362,7 @@
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
-    __ push(Immediate(isolate()->factory()->null_value()));
+    PushOperand(isolate()->factory()->null_value());
   } else {
     VisitForStackValue(expression);
     if (NeedsHomeObject(expression)) {
@@ -1443,7 +1412,7 @@
     Literal* key = property->key()->AsLiteral();
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(eax);  // Save result on the stack
+      PushOperand(eax);  // Save result on the stack
       result_saved = true;
     }
     switch (property->kind()) {
@@ -1472,24 +1441,24 @@
           }
           break;
         }
-        __ push(Operand(esp, 0));  // Duplicate receiver.
+        PushOperand(Operand(esp, 0));  // Duplicate receiver.
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
           if (NeedsHomeObject(value)) {
             EmitSetHomeObject(value, 2, property->GetSlot());
           }
-          __ push(Immediate(Smi::FromInt(SLOPPY)));  // Language mode
-          __ CallRuntime(Runtime::kSetProperty);
+          PushOperand(Smi::FromInt(SLOPPY));  // Language mode
+          CallRuntimeWithOperands(Runtime::kSetProperty);
         } else {
-          __ Drop(3);
+          DropOperands(3);
         }
         break;
       case ObjectLiteral::Property::PROTOTYPE:
-        __ push(Operand(esp, 0));  // Duplicate receiver.
+        PushOperand(Operand(esp, 0));  // Duplicate receiver.
         VisitForStackValue(value);
         DCHECK(property->emit_store());
-        __ CallRuntime(Runtime::kInternalSetPrototype);
+        CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                                NO_REGISTERS);
         break;
@@ -1511,14 +1480,14 @@
   for (AccessorTable::Iterator it = accessor_table.begin();
        it != accessor_table.end();
        ++it) {
-    __ push(Operand(esp, 0));  // Duplicate receiver.
+    PushOperand(Operand(esp, 0));  // Duplicate receiver.
     VisitForStackValue(it->first);
 
     EmitAccessor(it->second->getter);
     EmitAccessor(it->second->setter);
 
-    __ push(Immediate(Smi::FromInt(NONE)));
-    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+    PushOperand(Smi::FromInt(NONE));
+    CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1535,17 +1504,17 @@
 
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(eax);  // Save result on the stack
+      PushOperand(eax);  // Save result on the stack
       result_saved = true;
     }
 
-    __ push(Operand(esp, 0));  // Duplicate receiver.
+    PushOperand(Operand(esp, 0));  // Duplicate receiver.
 
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
       DCHECK(!property->is_computed_name());
       VisitForStackValue(value);
       DCHECK(property->emit_store());
-      __ CallRuntime(Runtime::kInternalSetPrototype);
+      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                              NO_REGISTERS);
     } else {
@@ -1560,10 +1529,11 @@
         case ObjectLiteral::Property::MATERIALIZED_LITERAL:
         case ObjectLiteral::Property::COMPUTED:
           if (property->emit_store()) {
-            __ push(Immediate(Smi::FromInt(NONE)));
-            __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+            PushOperand(Smi::FromInt(NONE));
+            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
           } else {
-            __ Drop(3);
+            DropOperands(3);
           }
           break;
 
@@ -1572,13 +1542,13 @@
           break;
 
         case ObjectLiteral::Property::GETTER:
-          __ push(Immediate(Smi::FromInt(NONE)));
-          __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
           break;
 
         case ObjectLiteral::Property::SETTER:
-          __ push(Immediate(Smi::FromInt(NONE)));
-          __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
           break;
       }
     }
@@ -1636,14 +1606,14 @@
   int array_index = 0;
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
-    if (subexpr->IsSpread()) break;
+    DCHECK(!subexpr->IsSpread());
 
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
-      __ push(eax);  // array literal.
+      PushOperand(eax);  // array literal.
       result_saved = true;
     }
     VisitForAccumulatorValue(subexpr);
@@ -1664,21 +1634,16 @@
   // (inclusive) and these elements gets appended to the array. Note that the
   // number elements an iterable produces is unknown ahead of time.
   if (array_index < length && result_saved) {
-    __ Pop(eax);
+    PopOperand(eax);
     result_saved = false;
   }
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
 
-    __ Push(eax);
-    if (subexpr->IsSpread()) {
-      VisitForStackValue(subexpr->AsSpread()->expression());
-      __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
-                       CALL_FUNCTION);
-    } else {
-      VisitForStackValue(subexpr);
-      __ CallRuntime(Runtime::kAppendElement);
-    }
+    PushOperand(eax);
+    DCHECK(!subexpr->IsSpread());
+    VisitForStackValue(subexpr);
+    CallRuntimeWithOperands(Runtime::kAppendElement);
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
   }
@@ -1710,10 +1675,10 @@
           property->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           property->obj()->AsSuperPropertyReference()->home_object());
-      __ push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
-        __ push(MemOperand(esp, kPointerSize));
-        __ push(result_register());
+        PushOperand(MemOperand(esp, kPointerSize));
+        PushOperand(result_register());
       }
       break;
     case NAMED_PROPERTY:
@@ -1731,11 +1696,11 @@
       VisitForStackValue(
           property->obj()->AsSuperPropertyReference()->home_object());
       VisitForAccumulatorValue(property->key());
-      __ Push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(result_register());
+        PushOperand(MemOperand(esp, 2 * kPointerSize));
+        PushOperand(MemOperand(esp, 2 * kPointerSize));
+        PushOperand(result_register());
       }
       break;
     case KEYED_PROPERTY: {
@@ -1782,7 +1747,7 @@
     }
 
     Token::Value op = expr->binary_op();
-    __ push(eax);  // Left operand goes on the stack.
+    PushOperand(eax);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
 
     if (ShouldInlineSmiCase(op)) {
@@ -1847,8 +1812,16 @@
 
       __ jmp(&suspend);
       __ bind(&continuation);
+      // When we arrive here, the stack top is the resume mode and
+      // result_register() holds the input value (the argument given to the
+      // respective resume operation).
       __ RecordGeneratorContinuation();
-      __ jmp(&resume);
+      __ pop(ebx);
+      __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
+      __ j(not_equal, &resume);
+      __ push(result_register());
+      EmitCreateIteratorResult(true);
+      EmitUnwindAndReturn();
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
@@ -1867,7 +1840,7 @@
       __ mov(context_register(),
              Operand(ebp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
-      __ pop(result_register());
+      PopOperand(result_register());
       EmitReturnSequence();
 
       __ bind(&resume);
@@ -1876,126 +1849,15 @@
     }
 
     case Yield::kFinal: {
-      VisitForAccumulatorValue(expr->generator_object());
-      __ mov(FieldOperand(result_register(),
-                          JSGeneratorObject::kContinuationOffset),
-             Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
       // Pop value from top-of-stack slot, box result into result register.
+      OperandStackDepthDecrement(1);
       EmitCreateIteratorResult(true);
-      EmitUnwindBeforeReturn();
-      EmitReturnSequence();
+      EmitUnwindAndReturn();
       break;
     }
 
-    case Yield::kDelegating: {
-      VisitForStackValue(expr->generator_object());
-
-      // Initial stack layout is as follows:
-      // [sp + 1 * kPointerSize] iter
-      // [sp + 0 * kPointerSize] g
-
-      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
-      Label l_next, l_call, l_loop;
-      Register load_receiver = LoadDescriptor::ReceiverRegister();
-      Register load_name = LoadDescriptor::NameRegister();
-
-      // Initial send value is undefined.
-      __ mov(eax, isolate()->factory()->undefined_value());
-      __ jmp(&l_next);
-
-      // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
-      __ bind(&l_catch);
-      __ mov(load_name, isolate()->factory()->throw_string());  // "throw"
-      __ push(load_name);                                       // "throw"
-      __ push(Operand(esp, 2 * kPointerSize));                  // iter
-      __ push(eax);                                             // exception
-      __ jmp(&l_call);
-
-      // try { received = %yield result }
-      // Shuffle the received result above a try handler and yield it without
-      // re-boxing.
-      __ bind(&l_try);
-      __ pop(eax);                                       // result
-      int handler_index = NewHandlerTableEntry();
-      EnterTryBlock(handler_index, &l_catch);
-      const int try_block_size = TryCatch::kElementCount * kPointerSize;
-      __ push(eax);                                      // result
-
-      __ jmp(&l_suspend);
-      __ bind(&l_continuation);
-      __ RecordGeneratorContinuation();
-      __ jmp(&l_resume);
-
-      __ bind(&l_suspend);
-      const int generator_object_depth = kPointerSize + try_block_size;
-      __ mov(eax, Operand(esp, generator_object_depth));
-      __ push(eax);                                      // g
-      __ push(Immediate(Smi::FromInt(handler_index)));   // handler-index
-      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
-             Immediate(Smi::FromInt(l_continuation.pos())));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
-      __ mov(ecx, esi);
-      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
-                          kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
-      __ mov(context_register(),
-             Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ pop(eax);                                       // result
-      EmitReturnSequence();
-      __ bind(&l_resume);                                // received in eax
-      ExitTryBlock(handler_index);
-
-      // receiver = iter; f = iter.next; arg = received;
-      __ bind(&l_next);
-
-      __ mov(load_name, isolate()->factory()->next_string());
-      __ push(load_name);                           // "next"
-      __ push(Operand(esp, 2 * kPointerSize));      // iter
-      __ push(eax);                                 // received
-
-      // result = receiver[f](arg);
-      __ bind(&l_call);
-      __ mov(load_receiver, Operand(esp, kPointerSize));
-      __ mov(LoadDescriptor::SlotRegister(),
-             Immediate(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
-      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
-      CallIC(ic, TypeFeedbackId::None());
-      __ mov(edi, eax);
-      __ mov(Operand(esp, 2 * kPointerSize), edi);
-      SetCallPosition(expr);
-      __ Set(eax, 1);
-      __ Call(
-          isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
-          RelocInfo::CODE_TARGET);
-
-      __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ Drop(1);  // The function is still on the stack; drop it.
-
-      // if (!result.done) goto l_try;
-      __ bind(&l_loop);
-      __ push(eax);                                      // save result
-      __ Move(load_receiver, eax);                       // result
-      __ mov(load_name,
-             isolate()->factory()->done_string());       // "done"
-      __ mov(LoadDescriptor::SlotRegister(),
-             Immediate(SmiFromSlot(expr->DoneFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);  // result.done in eax
-      Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
-      CallIC(bool_ic);
-      __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
-      __ j(not_equal, &l_try);
-
-      // result.value
-      __ pop(load_receiver);                              // result
-      __ mov(load_name,
-             isolate()->factory()->value_string());       // "value"
-      __ mov(LoadDescriptor::SlotRegister(),
-             Immediate(SmiFromSlot(expr->ValueFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);                      // result.value in eax
-      context()->DropAndPlug(2, eax);                     // drop iter and g
-      break;
-    }
+    case Yield::kDelegating:
+      UNREACHABLE();
   }
 }
 
@@ -2009,7 +1871,13 @@
   // ebx will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
   VisitForAccumulatorValue(value);
-  __ pop(ebx);
+  PopOperand(ebx);
+
+  // Store input value into generator object.
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), result_register());
+  __ mov(ecx, result_register());
+  __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, ecx, edx,
+                      kDontSaveFPRegs);
 
   // Load suspended function and context.
   __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
@@ -2059,6 +1927,7 @@
     __ add(edx, ecx);
     __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
            Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
     __ jmp(edx);
     __ bind(&slow_resume);
   }
@@ -2072,6 +1941,7 @@
   __ push(ecx);
   __ jmp(&push_operand_holes);
   __ bind(&call_resume);
+  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
   __ push(ebx);
   __ push(result_register());
   __ Push(Smi::FromInt(resume_mode));
@@ -2083,6 +1953,21 @@
   context()->Plug(result_register());
 }
 
+void FullCodeGenerator::PushOperand(MemOperand operand) {
+  OperandStackDepthIncrement(1);
+  __ Push(operand);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+  if (FLAG_debug_code) {
+    int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+                        operand_stack_depth_ * kPointerSize;
+    __ mov(eax, ebp);
+    __ sub(eax, esp);
+    __ cmp(eax, Immediate(expected_diff));
+    __ Assert(equal, kUnexpectedStackDepth);
+  }
+}
 
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
@@ -2118,37 +2003,7 @@
   __ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
   __ mov(LoadDescriptor::SlotRegister(),
          Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object.
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(prop->IsSuperAccess());
-
-  __ push(Immediate(key->value()));
-  __ push(Immediate(Smi::FromInt(language_mode())));
-  __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
-  __ mov(LoadDescriptor::SlotRegister(),
-         Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object, key.
-  SetExpressionPosition(prop);
-  __ push(Immediate(Smi::FromInt(language_mode())));
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallLoadIC(NOT_INSIDE_TYPEOF);
 }
 
 
@@ -2159,7 +2014,7 @@
   // Do combined smi check of the operands. Left operand is on the
   // stack. Right operand is in eax.
   Label smi_case, done, stub_call;
-  __ pop(edx);
+  PopOperand(edx);
   __ mov(ecx, eax);
   __ or_(eax, edx);
   JumpPatchSite patch_site(masm_);
@@ -2167,8 +2022,7 @@
 
   __ bind(&stub_call);
   __ mov(eax, ecx);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ jmp(&done, Label::kNear);
@@ -2248,24 +2102,14 @@
 
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  // Constructor is in eax.
-  DCHECK(lit != NULL);
-  __ push(eax);
-
-  // No access check is needed here since the constructor is created by the
-  // class literal.
-  Register scratch = ebx;
-  __ mov(scratch, FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset));
-  __ Push(scratch);
-
   for (int i = 0; i < lit->properties()->length(); i++) {
     ObjectLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     if (property->is_static()) {
-      __ push(Operand(esp, kPointerSize));  // constructor
+      PushOperand(Operand(esp, kPointerSize));  // constructor
     } else {
-      __ push(Operand(esp, 0));  // prototype
+      PushOperand(Operand(esp, 0));  // prototype
     }
     EmitPropertyKey(property, lit->GetIdForProperty(i));
 
@@ -2289,31 +2133,28 @@
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();
       case ObjectLiteral::Property::COMPUTED:
-        __ CallRuntime(Runtime::kDefineClassMethod);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
       case ObjectLiteral::Property::GETTER:
-        __ push(Immediate(Smi::FromInt(DONT_ENUM)));
-        __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
       case ObjectLiteral::Property::SETTER:
-        __ push(Immediate(Smi::FromInt(DONT_ENUM)));
-        __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
     }
   }
-
-  // Set both the prototype and constructor to have fast properties, and also
-  // freeze them in strong mode.
-  __ CallRuntime(Runtime::kFinalizeClassDefinition);
 }
 
 
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
-  __ pop(edx);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  PopOperand(edx);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
@@ -2336,10 +2177,10 @@
       break;
     }
     case NAMED_PROPERTY: {
-      __ push(eax);  // Preserve value.
+      PushOperand(eax);  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
       __ Move(StoreDescriptor::ReceiverRegister(), eax);
-      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
       __ mov(StoreDescriptor::NameRegister(),
              prop->key()->AsLiteral()->value());
       EmitLoadStoreICSlot(slot);
@@ -2347,7 +2188,7 @@
       break;
     }
     case NAMED_SUPER_PROPERTY: {
-      __ push(eax);
+      PushOperand(eax);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2364,7 +2205,7 @@
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      __ push(eax);
+      PushOperand(eax);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForStackValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2384,12 +2225,12 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ push(eax);  // Preserve value.
+      PushOperand(eax);  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ Move(StoreDescriptor::NameRegister(), eax);
-      __ pop(StoreDescriptor::ReceiverRegister());  // Receiver.
-      __ pop(StoreDescriptor::ValueRegister());     // Restore value.
+      PopOperand(StoreDescriptor::ReceiverRegister());  // Receiver.
+      PopOperand(StoreDescriptor::ValueRegister());     // Restore value.
       EmitLoadStoreICSlot(slot);
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2469,17 +2310,17 @@
              (var->mode() == CONST && op == Token::INIT)) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
-      __ push(eax);  // Value.
-      __ push(esi);  // Context.
-      __ push(Immediate(var->name()));
-      __ push(Immediate(Smi::FromInt(language_mode())));
-      __ CallRuntime(Runtime::kStoreLookupSlot);
+      __ Push(Immediate(var->name()));
+      __ Push(eax);
+      __ CallRuntime(is_strict(language_mode())
+                         ? Runtime::kStoreLookupSlot_Strict
+                         : Runtime::kStoreLookupSlot_Sloppy);
     } else {
       // Assignment to var or initializing assignment to let/const in harmony
       // mode.
       DCHECK(var->IsStackAllocated() || var->IsContextSlot());
       MemOperand location = VarOperand(var, ecx);
-      if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
         // Check for an uninitialized let binding.
         __ mov(edx, location);
         __ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2526,7 +2367,7 @@
   DCHECK(prop->key()->IsLiteral());
 
   __ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
-  __ pop(StoreDescriptor::ReceiverRegister());
+  PopOperand(StoreDescriptor::ReceiverRegister());
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2542,10 +2383,11 @@
   Literal* key = prop->key()->AsLiteral();
   DCHECK(key != NULL);
 
-  __ push(Immediate(key->value()));
-  __ push(eax);
-  __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
-                                             : Runtime::kStoreToSuper_Sloppy));
+  PushOperand(key->value());
+  PushOperand(eax);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreToSuper_Strict
+                              : Runtime::kStoreToSuper_Sloppy);
 }
 
 
@@ -2554,10 +2396,10 @@
   // eax : value
   // stack : receiver ('this'), home_object, key
 
-  __ push(eax);
-  __ CallRuntime((is_strict(language_mode())
-                      ? Runtime::kStoreKeyedToSuper_Strict
-                      : Runtime::kStoreKeyedToSuper_Sloppy));
+  PushOperand(eax);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreKeyedToSuper_Strict
+                              : Runtime::kStoreKeyedToSuper_Sloppy);
 }
 
 
@@ -2567,8 +2409,8 @@
   // esp[0]            : key
   // esp[kPointerSize] : receiver
 
-  __ pop(StoreDescriptor::NameRegister());  // Key.
-  __ pop(StoreDescriptor::ReceiverRegister());
+  PopOperand(StoreDescriptor::NameRegister());  // Key.
+  PopOperand(StoreDescriptor::ReceiverRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(eax));
   Handle<Code> ic =
       CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2600,7 +2442,7 @@
     if (!expr->IsSuperAccess()) {
       VisitForStackValue(expr->obj());
       VisitForAccumulatorValue(expr->key());
-      __ pop(LoadDescriptor::ReceiverRegister());                  // Object.
+      PopOperand(LoadDescriptor::ReceiverRegister());              // Object.
       __ Move(LoadDescriptor::NameRegister(), result_register());  // Key.
       EmitKeyedPropertyLoad(expr);
     } else {
@@ -2636,7 +2478,7 @@
     }
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
-    __ push(Immediate(isolate()->factory()->undefined_value()));
+    PushOperand(isolate()->factory()->undefined_value());
     convert_mode = ConvertReceiverMode::kNullOrUndefined;
   } else {
     // Load the function from the receiver.
@@ -2646,7 +2488,7 @@
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
-    __ push(Operand(esp, 0));
+    PushOperand(Operand(esp, 0));
     __ mov(Operand(esp, kPointerSize), eax);
     convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
   }
@@ -2668,19 +2510,17 @@
   SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
   VisitForStackValue(super_ref->home_object());
   VisitForAccumulatorValue(super_ref->this_var());
-  __ push(eax);
-  __ push(eax);
-  __ push(Operand(esp, kPointerSize * 2));
-  __ push(Immediate(key->value()));
-  __ push(Immediate(Smi::FromInt(language_mode())));
+  PushOperand(eax);
+  PushOperand(eax);
+  PushOperand(Operand(esp, kPointerSize * 2));
+  PushOperand(key->value());
   // Stack here:
   //  - home_object
   //  - this (receiver)
   //  - this (receiver) <-- LoadFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
 
   // Replace home_object with target function.
   __ mov(Operand(esp, kPointerSize), eax);
@@ -2708,7 +2548,7 @@
   PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
 
   // Push the target function under the receiver.
-  __ push(Operand(esp, 0));
+  PushOperand(Operand(esp, 0));
   __ mov(Operand(esp, kPointerSize), eax);
 
   EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2726,19 +2566,17 @@
   SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
   VisitForStackValue(super_ref->home_object());
   VisitForAccumulatorValue(super_ref->this_var());
-  __ push(eax);
-  __ push(eax);
-  __ push(Operand(esp, kPointerSize * 2));
+  PushOperand(eax);
+  PushOperand(eax);
+  PushOperand(Operand(esp, kPointerSize * 2));
   VisitForStackValue(prop->key());
-  __ push(Immediate(Smi::FromInt(language_mode())));
   // Stack here:
   //  - home_object
   //  - this (receiver)
   //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
 
   // Replace home_object with target function.
   __ mov(Operand(esp, kPointerSize), eax);
@@ -2760,12 +2598,23 @@
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
   SetCallPosition(expr);
-  Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+  if (expr->tail_call_mode() == TailCallMode::kAllow) {
+    if (FLAG_trace) {
+      __ CallRuntime(Runtime::kTraceTailCall);
+    }
+    // Update profiling counters before the tail call since we will
+    // not return to this function.
+    EmitProfilingCounterHandlingForReturnSequence(true);
+  }
+  Handle<Code> ic =
+      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+          .code();
   __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
   // Don't assign a type feedback id to the IC, since type feedback is provided
   // by the vector above.
   CallIC(ic);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
 
@@ -2811,11 +2660,10 @@
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in eax) and
     // the object holding it (returned in edx).
-    __ push(context_register());
-    __ push(Immediate(callee->name()));
-    __ CallRuntime(Runtime::kLoadLookupSlot);
-    __ push(eax);  // Function.
-    __ push(edx);  // Receiver.
+    __ Push(callee->name());
+    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+    PushOperand(eax);  // Function.
+    PushOperand(edx);  // Receiver.
     PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the function
@@ -2834,7 +2682,7 @@
   } else {
     VisitForStackValue(callee);
     // refEnv.WithBaseObject()
-    __ push(Immediate(isolate()->factory()->undefined_value()));
+    PushOperand(isolate()->factory()->undefined_value());
   }
 }
 
@@ -2866,7 +2714,10 @@
   SetCallPosition(expr);
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
   __ Set(eax, arg_count);
-  __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                      expr->tail_call_mode()),
+          RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2907,6 +2758,7 @@
 
   CallConstructStub stub(isolate());
   __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2925,7 +2777,7 @@
   __ AssertFunction(result_register());
   __ mov(result_register(),
          FieldOperand(result_register(), HeapObject::kMapOffset));
-  __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
+  PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2947,6 +2799,7 @@
   __ mov(edi, Operand(esp, arg_count * kPointerSize));
 
   __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
 
@@ -2999,77 +2852,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, SIMD128_VALUE_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, FIRST_FUNCTION_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(above_equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
-  __ CheckMap(eax, map, if_false, DO_SMI_CHECK);
-  // Check if the exponent half is 0x80000000. Comparing against 1 and
-  // checking for overflow is the shortest possible encoding.
-  __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x1));
-  __ j(no_overflow, if_false);
-  __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x0));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3158,68 +2940,6 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ pop(ebx);
-  __ cmp(eax, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in edx and the formal
-  // parameter count in eax.
-  VisitForAccumulatorValue(args->at(0));
-  __ mov(edx, eax);
-  __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
-  ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  DCHECK(expr->arguments()->length() == 0);
-
-  Label exit;
-  // Get the number of formal parameters.
-  __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &exit);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  __ AssertSmi(eax);
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3288,28 +3008,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = nullptr;
-  Label* if_false = nullptr;
-  Label* fall_through = nullptr;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(3, args->length());
@@ -3322,8 +3020,8 @@
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
 
-  __ pop(value);
-  __ pop(index);
+  PopOperand(value);
+  PopOperand(index);
 
   if (FLAG_debug_code) {
     __ test(value, Immediate(kSmiTagMask));
@@ -3357,8 +3055,8 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ pop(value);
-  __ pop(index);
+  PopOperand(value);
+  PopOperand(index);
 
   if (FLAG_debug_code) {
     __ test(value, Immediate(kSmiTagMask));
@@ -3379,35 +3077,6 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));  // Load the object.
-  VisitForAccumulatorValue(args->at(1));  // Load the value.
-  __ pop(ebx);  // eax = value. ebx = object.
-
-  Label done;
-  // If the object is a smi, return the value.
-  __ JumpIfSmi(ebx, &done, Label::kNear);
-
-  // If the object is not a value type, return the value.
-  __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
-  __ j(not_equal, &done, Label::kNear);
-
-  // Store the value.
-  __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
-
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  __ mov(edx, eax);
-  __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
-
-  __ bind(&done);
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3425,27 +3094,6 @@
 }
 
 
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into eax and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Convert the object to a name.
-  Label convert, done_convert;
-  __ JumpIfSmi(eax, &convert, Label::kNear);
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ CmpObjectType(eax, LAST_NAME_TYPE, ecx);
-  __ j(below_equal, &done_convert, Label::kNear);
-  __ bind(&convert);
-  __ Push(eax);
-  __ CallRuntime(Runtime::kToName);
-  __ bind(&done_convert);
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3476,7 +3124,7 @@
   Register index = eax;
   Register result = edx;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3523,7 +3171,7 @@
   Register scratch = edx;
   Register result = eax;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3573,6 +3221,7 @@
   // Call the target.
   __ mov(eax, Immediate(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(argc + 1);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   // Discard the function left on TOS.
@@ -3629,275 +3278,6 @@
 }
 
 
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
-  Label bailout, done, one_char_separator, long_separator,
-      non_trivial_array, not_size_one_array, loop,
-      loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  // We will leave the separator on the stack until the end of the function.
-  VisitForStackValue(args->at(1));
-  // Load this to eax (= array)
-  VisitForAccumulatorValue(args->at(0));
-  // All aliases of the same register have disjoint lifetimes.
-  Register array = eax;
-  Register elements = no_reg;  // Will be eax.
-
-  Register index = edx;
-
-  Register string_length = ecx;
-
-  Register string = esi;
-
-  Register scratch = ebx;
-
-  Register array_length = edi;
-  Register result_pos = no_reg;  // Will be edi.
-
-  // Separator operand is already pushed.
-  Operand separator_operand = Operand(esp, 2 * kPointerSize);
-  Operand result_operand = Operand(esp, 1 * kPointerSize);
-  Operand array_length_operand = Operand(esp, 0);
-  __ sub(esp, Immediate(2 * kPointerSize));
-  __ cld();
-  // Check that the array is a JSArray
-  __ JumpIfSmi(array, &bailout);
-  __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, &bailout);
-
-  // Check that the array has fast elements.
-  __ CheckFastElements(scratch, &bailout);
-
-  // If the array has length zero, return the empty string.
-  __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
-  __ SmiUntag(array_length);
-  __ j(not_zero, &non_trivial_array);
-  __ mov(result_operand, isolate()->factory()->empty_string());
-  __ jmp(&done);
-
-  // Save the array length.
-  __ bind(&non_trivial_array);
-  __ mov(array_length_operand, array_length);
-
-  // Save the FixedArray containing array's elements.
-  // End of array's live range.
-  elements = array;
-  __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
-  array = no_reg;
-
-
-  // Check that all array elements are sequential one-byte strings, and
-  // accumulate the sum of their lengths, as a smi-encoded value.
-  __ Move(index, Immediate(0));
-  __ Move(string_length, Immediate(0));
-  // Loop condition: while (index < length).
-  // Live loop registers: index, array_length, string,
-  //                      scratch, string_length, elements.
-  if (generate_debug_code_) {
-    __ cmp(index, array_length);
-    __ Assert(less, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
-  }
-  __ bind(&loop);
-  __ mov(string, FieldOperand(elements,
-                              index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ JumpIfSmi(string, &bailout);
-  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-  __ add(string_length,
-         FieldOperand(string, SeqOneByteString::kLengthOffset));
-  __ j(overflow, &bailout);
-  __ add(index, Immediate(1));
-  __ cmp(index, array_length);
-  __ j(less, &loop);
-
-  // If array_length is 1, return elements[0], a string.
-  __ cmp(array_length, 1);
-  __ j(not_equal, &not_size_one_array);
-  __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
-  __ mov(result_operand, scratch);
-  __ jmp(&done);
-
-  __ bind(&not_size_one_array);
-
-  // End of array_length live range.
-  result_pos = array_length;
-  array_length = no_reg;
-
-  // Live registers:
-  // string_length: Sum of string lengths, as a smi.
-  // elements: FixedArray of strings.
-
-  // Check that the separator is a flat one-byte string.
-  __ mov(string, separator_operand);
-  __ JumpIfSmi(string, &bailout);
-  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-
-  // Add (separator length times array_length) - separator length
-  // to string_length.
-  __ mov(scratch, separator_operand);
-  __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
-  __ sub(string_length, scratch);  // May be negative, temporarily.
-  __ imul(scratch, array_length_operand);
-  __ j(overflow, &bailout);
-  __ add(string_length, scratch);
-  __ j(overflow, &bailout);
-
-  __ shr(string_length, 1);
-
-  // Bailout for large object allocations.
-  __ cmp(string_length, Page::kMaxRegularHeapObjectSize);
-  __ j(greater, &bailout);
-
-  // Live registers and stack values:
-  //   string_length
-  //   elements
-  __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
-                           &bailout);
-  __ mov(result_operand, result_pos);
-  __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
-
-
-  __ mov(string, separator_operand);
-  __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
-         Immediate(Smi::FromInt(1)));
-  __ j(equal, &one_char_separator);
-  __ j(greater, &long_separator);
-
-
-  // Empty separator case
-  __ mov(index, Immediate(0));
-  __ jmp(&loop_1_condition);
-  // Loop condition: while (index < length).
-  __ bind(&loop_1);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-  //   elements: the FixedArray of strings we are joining.
-
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
-  __ bind(&loop_1_condition);
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_1);  // End while (index < length).
-  __ jmp(&done);
-
-
-
-  // One-character separator case
-  __ bind(&one_char_separator);
-  // Replace separator with its one-byte character value.
-  __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ mov_b(separator_operand, scratch);
-
-  __ Move(index, Immediate(0));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_2_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_2);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-
-  // Copy the separator character to the result.
-  __ mov_b(scratch, separator_operand);
-  __ mov_b(Operand(result_pos, 0), scratch);
-  __ inc(result_pos);
-
-  __ bind(&loop_2_entry);
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
-
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_2);  // End while (index < length).
-  __ jmp(&done);
-
-
-  // Long separator case (separator is more than one character).
-  __ bind(&long_separator);
-
-  __ Move(index, Immediate(0));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_3_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_3);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-
-  // Copy the separator to the result.
-  __ mov(string, separator_operand);
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-
-  __ bind(&loop_3_entry);
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
-
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_3);  // End while (index < length).
-  __ jmp(&done);
-
-
-  __ bind(&bailout);
-  __ mov(result_operand, isolate()->factory()->undefined_value());
-  __ bind(&done);
-  __ mov(eax, result_operand);
-  // Drop temp values from the stack, and restore context register.
-  __ add(esp, Immediate(3 * kPointerSize));
-
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3930,7 +3310,7 @@
   __ jmp(&done, Label::kNear);
 
   __ bind(&runtime);
-  __ CallRuntime(Runtime::kCreateIterResultObject);
+  CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
 
   __ bind(&done);
   context()->Plug(eax);
@@ -3939,7 +3319,7 @@
 
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
   // Push undefined as receiver.
-  __ push(Immediate(isolate()->factory()->undefined_value()));
+  PushOperand(isolate()->factory()->undefined_value());
 
   __ LoadGlobalFunction(expr->context_index(), eax);
 }
@@ -3954,6 +3334,7 @@
   __ Set(eax, arg_count);
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 }
 
 
@@ -3966,7 +3347,7 @@
     EmitLoadJSRuntimeFunction(expr);
 
     // Push the target function under the receiver.
-    __ push(Operand(esp, 0));
+    PushOperand(Operand(esp, 0));
     __ mov(Operand(esp, kPointerSize), eax);
 
     // Push the arguments ("left-to-right").
@@ -4001,6 +3382,7 @@
         // Call the C runtime function.
         PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
         __ CallRuntime(expr->function(), arg_count);
+        OperandStackDepthDecrement(arg_count);
         context()->Plug(eax);
       }
     }
@@ -4018,9 +3400,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ CallRuntime(is_strict(language_mode())
-                           ? Runtime::kDeleteProperty_Strict
-                           : Runtime::kDeleteProperty_Sloppy);
+        CallRuntimeWithOperands(is_strict(language_mode())
+                                    ? Runtime::kDeleteProperty_Strict
+                                    : Runtime::kDeleteProperty_Sloppy);
         context()->Plug(eax);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
@@ -4042,8 +3424,7 @@
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
-          __ push(context_register());
-          __ push(Immediate(var->name()));
+          __ Push(var->name());
           __ CallRuntime(Runtime::kDeleteLookupSlot);
           context()->Plug(eax);
         }
@@ -4088,6 +3469,7 @@
                         &materialize_false,
                         &materialize_true,
                         &materialize_true);
+        if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
         PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
         if (context()->IsAccumulatorValue()) {
@@ -4143,7 +3525,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      __ push(Immediate(Smi::FromInt(0)));
+      PushOperand(Smi::FromInt(0));
     }
     switch (assign_type) {
       case NAMED_PROPERTY: {
@@ -4158,9 +3540,9 @@
         VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
         VisitForAccumulatorValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
-        __ push(result_register());
-        __ push(MemOperand(esp, kPointerSize));
-        __ push(result_register());
+        PushOperand(result_register());
+        PushOperand(MemOperand(esp, kPointerSize));
+        PushOperand(result_register());
         EmitNamedSuperPropertyLoad(prop);
         break;
       }
@@ -4170,10 +3552,10 @@
         VisitForStackValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
         VisitForAccumulatorValue(prop->key());
-        __ push(result_register());
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(result_register());
+        PushOperand(result_register());
+        PushOperand(MemOperand(esp, 2 * kPointerSize));
+        PushOperand(MemOperand(esp, 2 * kPointerSize));
+        PushOperand(result_register());
         EmitKeyedSuperPropertyLoad(prop);
         break;
       }
@@ -4263,7 +3645,7 @@
       // of the stack.
       switch (assign_type) {
         case VARIABLE:
-          __ push(eax);
+          PushOperand(eax);
           break;
         case NAMED_PROPERTY:
           __ mov(Operand(esp, kPointerSize), eax);
@@ -4287,8 +3669,8 @@
   __ bind(&stub_call);
   __ mov(edx, eax);
   __ mov(eax, Immediate(Smi::FromInt(1)));
-  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
-                                              strength(language_mode())).code();
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
   CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
@@ -4323,7 +3705,7 @@
     case NAMED_PROPERTY: {
       __ mov(StoreDescriptor::NameRegister(),
              prop->key()->AsLiteral()->value());
-      __ pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4359,8 +3741,8 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ pop(StoreDescriptor::NameRegister());
-      __ pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::NameRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
@@ -4415,8 +3797,8 @@
     __ cmp(eax, isolate()->factory()->false_value());
     Split(equal, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
-    __ cmp(eax, isolate()->factory()->undefined_value());
-    __ j(equal, if_true);
+    __ cmp(eax, isolate()->factory()->null_value());
+    __ j(equal, if_false);
     __ JumpIfSmi(eax, if_false);
     // Check for undetectable objects => true.
     __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -4481,7 +3863,7 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      __ CallRuntime(Runtime::kHasProperty);
+      CallRuntimeWithOperands(Runtime::kHasProperty);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
@@ -4489,7 +3871,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
-      __ Pop(edx);
+      PopOperand(edx);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4501,7 +3883,7 @@
     default: {
       VisitForAccumulatorValue(expr->right());
       Condition cc = CompareIC::ComputeCondition(op);
-      __ pop(edx);
+      PopOperand(edx);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4515,8 +3897,7 @@
         __ bind(&slow_case);
       }
 
-      Handle<Code> ic = CodeFactory::CompareIC(
-                            isolate(), op, strength(language_mode())).code();
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
 
@@ -4596,15 +3977,15 @@
     // as their closure, not the anonymous closure containing the global
     // code.
     __ mov(eax, NativeContextOperand());
-    __ push(ContextOperand(eax, Context::CLOSURE_INDEX));
+    PushOperand(ContextOperand(eax, Context::CLOSURE_INDEX));
   } else if (closure_scope->is_eval_scope()) {
     // Contexts nested inside eval code have the same closure as the context
     // calling eval, not the anonymous closure containing the eval code.
     // Fetch it from the context.
-    __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
+    PushOperand(ContextOperand(esi, Context::CLOSURE_INDEX));
   } else {
     DCHECK(closure_scope->is_function_scope());
-    __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+    PushOperand(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   }
 }
 
@@ -4613,23 +3994,11 @@
 // Non-local control flow support.
 
 void FullCodeGenerator::EnterFinallyBlock() {
-  // Cook return address on top of stack (smi encoded Code* delta)
-  DCHECK(!result_register().is(edx));
-  __ pop(edx);
-  __ sub(edx, Immediate(masm_->CodeObject()));
-  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ SmiTag(edx);
-  __ push(edx);
-
-  // Store result register while executing finally block.
-  __ push(result_register());
-
   // Store pending message while executing finally block.
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ mov(edx, Operand::StaticVariable(pending_message_obj));
-  __ push(edx);
+  PushOperand(edx);
 
   ClearPendingMessage();
 }
@@ -4638,19 +4007,10 @@
 void FullCodeGenerator::ExitFinallyBlock() {
   DCHECK(!result_register().is(edx));
   // Restore pending message from stack.
-  __ pop(edx);
+  PopOperand(edx);
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ mov(Operand::StaticVariable(pending_message_obj), edx);
-
-  // Restore result register from stack.
-  __ pop(result_register());
-
-  // Uncook return address.
-  __ pop(edx);
-  __ SmiUntag(edx);
-  __ add(edx, Immediate(masm_->CodeObject()));
-  __ jmp(edx);
 }
 
 
@@ -4669,6 +4029,32 @@
          Immediate(SmiFromSlot(slot)));
 }
 
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+  DCHECK(!result_register().is(edx));
+  __ Pop(result_register());  // Restore the accumulator.
+  __ Pop(edx);                // Get the token.
+  for (DeferredCommand cmd : commands_) {
+    Label skip;
+    __ cmp(edx, Immediate(Smi::FromInt(cmd.token)));
+    __ j(not_equal, &skip);
+    switch (cmd.command) {
+      case kReturn:
+        codegen_->EmitUnwindAndReturn();
+        break;
+      case kThrow:
+        __ Push(result_register());
+        __ CallRuntime(Runtime::kReThrow);
+        break;
+      case kContinue:
+        codegen_->EmitContinue(cmd.target);
+        break;
+      case kBreak:
+        codegen_->EmitBreak(cmd.target);
+        break;
+    }
+    __ bind(&skip);
+  }
+}
 
 #undef __
 
diff --git a/src/full-codegen/mips/full-codegen-mips.cc b/src/full-codegen/mips/full-codegen-mips.cc
index 07e9fdf..c8ce204 100644
--- a/src/full-codegen/mips/full-codegen-mips.cc
+++ b/src/full-codegen/mips/full-codegen-mips.cc
@@ -27,8 +27,7 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
 
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
@@ -86,6 +85,7 @@
   }
 
  private:
+  MacroAssembler* masm() { return masm_; }
   MacroAssembler* masm_;
   Label patch_site_;
 #ifdef DEBUG
@@ -118,13 +118,6 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-    __ stop("stop-at");
-  }
-#endif
-
   if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
     int receiver_offset = info->scope()->num_parameters() * kPointerSize;
     __ lw(a2, MemOperand(sp, receiver_offset));
@@ -146,6 +139,7 @@
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
     DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+    OperandStackDepthIncrement(locals_count);
     if (locals_count > 0) {
       if (locals_count >= 128) {
         Label ok;
@@ -274,22 +268,12 @@
   Variable* rest_param = scope()->rest_parameter(&rest_index);
   if (rest_param) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
-
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-
-    __ li(RestParamAccessDescriptor::parameter_count(),
-          Operand(Smi::FromInt(num_parameters)));
-    __ Addu(RestParamAccessDescriptor::parameter_pointer(), fp,
-            Operand(StandardFrameConstants::kCallerSPOffset + offset));
-    __ li(RestParamAccessDescriptor::rest_parameter_index(),
-          Operand(Smi::FromInt(rest_index)));
-    DCHECK(a1.is(RestParamAccessDescriptor::rest_parameter_index()));
-    function_in_register_a1 = false;
-
-    RestParamAccessStub stub(isolate());
+    if (!function_in_register_a1) {
+      __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    }
+    FastNewRestParameterStub stub(isolate());
     __ CallStub(&stub);
-
+    function_in_register_a1 = false;
     SetVar(rest_param, v0, a1, a2);
   }
 
@@ -297,28 +281,20 @@
   if (arguments != NULL) {
     // Function uses arguments object.
     Comment cmnt(masm_, "[ Allocate arguments object");
-    DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
     if (!function_in_register_a1) {
       // Load this again, if it's used by the local context below.
       __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     }
-    // Receiver is just before the parameters on the caller's stack.
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-    __ li(ArgumentsAccessNewDescriptor::parameter_count(),
-          Operand(Smi::FromInt(num_parameters)));
-    __ Addu(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
-            Operand(StandardFrameConstants::kCallerSPOffset + offset));
-
-    // Arguments to ArgumentsAccessStub:
-    //   function, parameter pointer, parameter count.
-    // The stub will rewrite parameter pointer and parameter count if the
-    // previous stack frame was an arguments adapter frame.
-    bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
-    ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
-        is_unmapped, literal()->has_duplicate_parameters());
-    ArgumentsAccessStub stub(isolate(), type);
-    __ CallStub(&stub);
+    if (is_strict(language_mode()) || !has_simple_parameters()) {
+      FastNewStrictArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    } else if (literal()->has_duplicate_parameters()) {
+      __ Push(a1);
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+    } else {
+      FastNewSloppyArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    }
 
     SetVar(arguments, v0, a1, a2);
   }
@@ -431,6 +407,30 @@
   PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
 }
 
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+    bool is_tail_call) {
+  // Pretend that the exit is a backwards jump to the entry.
+  int weight = 1;
+  if (info_->ShouldSelfOptimize()) {
+    weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+  } else {
+    int distance = masm_->pc_offset();
+    weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+  }
+  EmitProfilingCounterDecrement(weight);
+  Label ok;
+  __ Branch(&ok, ge, a3, Operand(zero_reg));
+  // Don't need to save result register if we are going to do a tail call.
+  if (!is_tail_call) {
+    __ push(v0);
+  }
+  __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+  if (!is_tail_call) {
+    __ pop(v0);
+  }
+  EmitProfilingCounterReset();
+  __ bind(&ok);
+}
 
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
@@ -444,24 +444,7 @@
       __ push(v0);
       __ CallRuntime(Runtime::kTraceExit);
     }
-    // Pretend that the exit is a backwards jump to the entry.
-    int weight = 1;
-    if (info_->ShouldSelfOptimize()) {
-      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-    } else {
-      int distance = masm_->pc_offset();
-      weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kCodeSizeMultiplier));
-    }
-    EmitProfilingCounterDecrement(weight);
-    Label ok;
-    __ Branch(&ok, ge, a3, Operand(zero_reg));
-    __ push(v0);
-    __ Call(isolate()->builtins()->InterruptCheck(),
-            RelocInfo::CODE_TARGET);
-    __ pop(v0);
-    EmitProfilingCounterReset();
-    __ bind(&ok);
+    EmitProfilingCounterHandlingForReturnSequence(false);
 
     // Make sure that the constant pool is not emitted inside of the return
     // sequence.
@@ -483,7 +466,7 @@
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -500,7 +483,7 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Heap::RootListIndex index) const {
   __ LoadRoot(result_register(), index);
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -535,7 +518,7 @@
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
   // Immediates cannot be pushed directly.
   __ li(result_register(), Operand(lit));
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -544,7 +527,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ Branch(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -569,41 +552,14 @@
 }
 
 
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
-                                                   Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
-    int count,
-    Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-  __ Move(result_register(), reg);
-}
-
-
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
   DCHECK(count > 0);
-  if (count > 1) __ Drop(count - 1);
+  if (count > 1) codegen()->DropOperands(count - 1);
   __ sw(reg, MemOperand(sp, 0));
 }
 
 
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
-                                                 Register reg) const {
-  DCHECK(count > 0);
-  // For simplicity we always test the accumulator register.
-  __ Drop(count);
-  __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
-  codegen()->DoTest(this);
-}
-
-
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
   DCHECK(materialize_true == materialize_false);
@@ -627,6 +583,7 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
+  codegen()->OperandStackDepthIncrement(1);
   Label done;
   __ bind(materialize_true);
   __ LoadRoot(at, Heap::kTrueValueRootIndex);
@@ -658,7 +615,7 @@
   Heap::RootListIndex value_root_index =
       flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
   __ LoadRoot(at, value_root_index);
-  __ push(at);
+  codegen()->PushOperand(at);
 }
 
 
@@ -783,7 +740,7 @@
   // The variable in the declaration always resides in the current function
   // context.
   DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
-  if (generate_debug_code_) {
+  if (FLAG_debug_code) {
     // Check that we're not inside a with or catch context.
     __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
     __ LoadRoot(t0, Heap::kWithContextMapRootIndex);
@@ -905,11 +862,11 @@
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ FunctionDeclaration");
       __ li(a2, Operand(variable->name()));
-      __ Push(a2);
+      PushOperand(a2);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
       break;
     }
   }
@@ -984,8 +941,8 @@
 
     // Record position before stub call for type feedback.
     SetExpressionPosition(clause);
-    Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
-                                             strength(language_mode())).code();
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -1006,7 +963,7 @@
   // Discard the test value and jump to the default if present, otherwise to
   // the end of the statement.
   __ bind(&next_test);
-  __ Drop(1);  // Switch value is no longer needed.
+  DropOperands(1);  // Switch value is no longer needed.
   if (default_clause == NULL) {
     __ Branch(nested_statement.break_label());
   } else {
@@ -1037,23 +994,23 @@
   ForIn loop_statement(this, stmt);
   increment_loop_depth();
 
-  // Get the object to enumerate over. If the object is null or undefined, skip
-  // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  __ mov(a0, result_register());  // Result as param to InvokeBuiltin below.
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  __ Branch(&exit, eq, a0, Operand(at));
-  Register null_value = t1;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ Branch(&exit, eq, a0, Operand(null_value));
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-  __ mov(a0, v0);
-  // Convert the object to a JS object.
+  __ mov(a0, result_register());
+  OperandStackDepthIncrement(ForIn::kElementCount);
+
+  // If the object is null or undefined, skip over the loop, otherwise convert
+  // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
   Label convert, done_convert;
   __ JumpIfSmi(a0, &convert);
   __ GetObjectType(a0, a1, a1);
-  __ Branch(&done_convert, ge, a1, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ Branch(USE_DELAY_SLOT, &done_convert, ge, a1,
+            Operand(FIRST_JS_RECEIVER_TYPE));
+  __ LoadRoot(at, Heap::kNullValueRootIndex);  // In delay slot.
+  __ Branch(USE_DELAY_SLOT, &exit, eq, a0, Operand(at));
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);  // In delay slot.
+  __ Branch(&exit, eq, a0, Operand(at));
   __ bind(&convert);
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
@@ -1062,16 +1019,14 @@
   PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
   __ push(a0);
 
-  // Check for proxies.
-  Label call_runtime;
-  __ GetObjectType(a0, a1, a1);
-  __ Branch(&call_runtime, eq, a1, Operand(JS_PROXY_TYPE));
-
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  __ CheckEnumCache(null_value, &call_runtime);
+  // Note: Proxies never have an enum cache, so will always take the
+  // slow path.
+  Label call_runtime;
+  __ CheckEnumCache(&call_runtime);
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -1082,7 +1037,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(a0);  // Duplicate the enumerable object on the stack.
-  __ CallRuntime(Runtime::kGetPropertyNamesFast);
+  __ CallRuntime(Runtime::kForInEnumerate);
   PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
 
   // If we got a map from the runtime call, we can do a fast
@@ -1117,16 +1072,18 @@
   // We got a fixed array in register v0. Iterate through that.
   __ bind(&fixed_array);
 
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(a1);
   __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-  int vector_index = SmiFromSlot(slot)->value();
   __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
 
   __ li(a1, Operand(Smi::FromInt(1)));  // Smi(1) indicates slow check
   __ Push(a1, v0);  // Smi and array
   __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+  __ Push(a1);  // Fixed array length (as smi).
+  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
   __ li(a0, Operand(Smi::FromInt(0)));
-  __ Push(a1, a0);  // Fixed array length (as smi) and initial index.
+  __ Push(a0);  // Initial index.
 
   // Generate code for doing the condition check.
   __ bind(&loop);
@@ -1140,8 +1097,7 @@
   // Get the current entry of the array into register a3.
   __ lw(a2, MemOperand(sp, 2 * kPointerSize));
   __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
-  __ addu(t0, a2, t0);  // Array base + scaled (smi) index.
+  __ Lsa(t0, a2, a0, kPointerSizeLog2 - kSmiTagSize);
   __ lw(a3, MemOperand(t0));  // Current entry.
 
   // Get the expected map from the stack or a smi in the
@@ -1155,6 +1111,16 @@
   __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
   __ Branch(&update_each, eq, t0, Operand(a2));
 
+  // We might get here from TurboFan or Crankshaft when something in the
+  // for-in loop body deopts and only now notice in fullcodegen, that we
+  // can now longer use the enum cache, i.e. left fast mode. So better record
+  // this information here, in case we later OSR back into this loop or
+  // reoptimize the whole function w/o rerunning the loop with the slow
+  // mode object in fullcodegen (which would result in a deopt loop).
+  __ EmitLoadTypeFeedbackVector(a0);
+  __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ sw(a2, FieldMemOperand(a0, FixedArray::OffsetOfElementAt(vector_index)));
+
   // Convert the entry to a string or (smi) 0 if it isn't a property
   // any more. If the property has been removed while iterating, we
   // just skip it.
@@ -1192,7 +1158,7 @@
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
-  __ Drop(5);
+  DropOperands(5);
 
   // Exit and decrement the loop depth.
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1439,12 +1405,11 @@
       // by eval-introduced variables.
       EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
       __ bind(&slow);
-      __ li(a1, Operand(var->name()));
-      __ Push(cp, a1);  // Context and name.
+      __ Push(var->name());
       Runtime::FunctionId function_id =
           typeof_mode == NOT_INSIDE_TYPEOF
               ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotNoReferenceError;
+              : Runtime::kLoadLookupSlotInsideTypeof;
       __ CallRuntime(function_id);
       __ bind(&done);
       context()->Plug(v0);
@@ -1469,7 +1434,7 @@
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
     __ LoadRoot(a1, Heap::kNullValueRootIndex);
-    __ push(a1);
+    PushOperand(a1);
   } else {
     VisitForStackValue(expression);
     if (NeedsHomeObject(expression)) {
@@ -1513,7 +1478,7 @@
     Literal* key = property->key()->AsLiteral();
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(v0);  // Save result on stack.
+      PushOperand(v0);  // Save result on stack.
       result_saved = true;
     }
     switch (property->kind()) {
@@ -1546,7 +1511,7 @@
         }
         // Duplicate receiver on stack.
         __ lw(a0, MemOperand(sp));
-        __ push(a0);
+        PushOperand(a0);
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
@@ -1554,19 +1519,19 @@
             EmitSetHomeObject(value, 2, property->GetSlot());
           }
           __ li(a0, Operand(Smi::FromInt(SLOPPY)));  // PropertyAttributes.
-          __ push(a0);
-          __ CallRuntime(Runtime::kSetProperty);
+          PushOperand(a0);
+          CallRuntimeWithOperands(Runtime::kSetProperty);
         } else {
-          __ Drop(3);
+          DropOperands(3);
         }
         break;
       case ObjectLiteral::Property::PROTOTYPE:
         // Duplicate receiver on stack.
         __ lw(a0, MemOperand(sp));
-        __ push(a0);
+        PushOperand(a0);
         VisitForStackValue(value);
         DCHECK(property->emit_store());
-        __ CallRuntime(Runtime::kInternalSetPrototype);
+        CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                                NO_REGISTERS);
         break;
@@ -1589,13 +1554,13 @@
        it != accessor_table.end();
        ++it) {
     __ lw(a0, MemOperand(sp));  // Duplicate receiver.
-    __ push(a0);
+    PushOperand(a0);
     VisitForStackValue(it->first);
     EmitAccessor(it->second->getter);
     EmitAccessor(it->second->setter);
     __ li(a0, Operand(Smi::FromInt(NONE)));
-    __ push(a0);
-    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+    PushOperand(a0);
+    CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1612,18 +1577,18 @@
 
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(v0);  // Save result on the stack
+      PushOperand(v0);  // Save result on the stack
       result_saved = true;
     }
 
     __ lw(a0, MemOperand(sp));  // Duplicate receiver.
-    __ push(a0);
+    PushOperand(a0);
 
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
       DCHECK(!property->is_computed_name());
       VisitForStackValue(value);
       DCHECK(property->emit_store());
-      __ CallRuntime(Runtime::kInternalSetPrototype);
+      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                              NO_REGISTERS);
     } else {
@@ -1638,11 +1603,11 @@
         case ObjectLiteral::Property::MATERIALIZED_LITERAL:
         case ObjectLiteral::Property::COMPUTED:
           if (property->emit_store()) {
-            __ li(a0, Operand(Smi::FromInt(NONE)));
-            __ push(a0);
-            __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+            PushOperand(Smi::FromInt(NONE));
+            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
           } else {
-            __ Drop(3);
+            DropOperands(3);
           }
           break;
 
@@ -1651,15 +1616,13 @@
           break;
 
         case ObjectLiteral::Property::GETTER:
-          __ li(a0, Operand(Smi::FromInt(NONE)));
-          __ push(a0);
-          __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
           break;
 
         case ObjectLiteral::Property::SETTER:
-          __ li(a0, Operand(Smi::FromInt(NONE)));
-          __ push(a0);
-          __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
           break;
       }
     }
@@ -1717,14 +1680,14 @@
   int array_index = 0;
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
-    if (subexpr->IsSpread()) break;
+    DCHECK(!subexpr->IsSpread());
 
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
-      __ push(v0);  // array literal
+      PushOperand(v0);  // array literal
       result_saved = true;
     }
 
@@ -1747,21 +1710,16 @@
   // (inclusive) and these elements gets appended to the array. Note that the
   // number elements an iterable produces is unknown ahead of time.
   if (array_index < length && result_saved) {
-    __ Pop(v0);
+    PopOperand(v0);
     result_saved = false;
   }
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
 
-    __ Push(v0);
-    if (subexpr->IsSpread()) {
-      VisitForStackValue(subexpr->AsSpread()->expression());
-      __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
-                       CALL_FUNCTION);
-    } else {
-      VisitForStackValue(subexpr);
-      __ CallRuntime(Runtime::kAppendElement);
-    }
+    PushOperand(v0);
+    DCHECK(!subexpr->IsSpread());
+    VisitForStackValue(subexpr);
+    CallRuntimeWithOperands(Runtime::kAppendElement);
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
   }
@@ -1802,11 +1760,11 @@
           property->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           property->obj()->AsSuperPropertyReference()->home_object());
-      __ Push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
         const Register scratch = a1;
         __ lw(scratch, MemOperand(sp, kPointerSize));
-        __ Push(scratch, result_register());
+        PushOperands(scratch, result_register());
       }
       break;
     case KEYED_SUPER_PROPERTY: {
@@ -1817,11 +1775,11 @@
           property->obj()->AsSuperPropertyReference()->home_object());
       __ Move(scratch, result_register());
       VisitForAccumulatorValue(property->key());
-      __ Push(scratch, result_register());
+      PushOperands(scratch, result_register());
       if (expr->is_compound()) {
         const Register scratch1 = t0;
         __ lw(scratch1, MemOperand(sp, 2 * kPointerSize));
-        __ Push(scratch1, scratch, result_register());
+        PushOperands(scratch1, scratch, result_register());
       }
       break;
     }
@@ -1869,7 +1827,7 @@
     }
 
     Token::Value op = expr->binary_op();
-    __ push(v0);  // Left operand goes on the stack.
+    PushOperand(v0);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
 
     AccumulatorValueContext context(this);
@@ -1935,8 +1893,16 @@
 
       __ jmp(&suspend);
       __ bind(&continuation);
+      // When we arrive here, the stack top is the resume mode and
+      // result_register() holds the input value (the argument given to the
+      // respective resume operation).
       __ RecordGeneratorContinuation();
-      __ jmp(&resume);
+      __ pop(a1);
+      __ Branch(&resume, ne, a1,
+                Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+      __ push(result_register());
+      EmitCreateIteratorResult(true);
+      EmitUnwindAndReturn();
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
@@ -1953,7 +1919,7 @@
       __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
-      __ pop(result_register());
+      PopOperand(result_register());
       EmitReturnSequence();
 
       __ bind(&resume);
@@ -1962,127 +1928,15 @@
     }
 
     case Yield::kFinal: {
-      VisitForAccumulatorValue(expr->generator_object());
-      __ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
-      __ sw(a1, FieldMemOperand(result_register(),
-                                JSGeneratorObject::kContinuationOffset));
       // Pop value from top-of-stack slot, box result into result register.
+      OperandStackDepthDecrement(1);
       EmitCreateIteratorResult(true);
-      EmitUnwindBeforeReturn();
-      EmitReturnSequence();
+      EmitUnwindAndReturn();
       break;
     }
 
-    case Yield::kDelegating: {
-      VisitForStackValue(expr->generator_object());
-
-      // Initial stack layout is as follows:
-      // [sp + 1 * kPointerSize] iter
-      // [sp + 0 * kPointerSize] g
-
-      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
-      Label l_next, l_call;
-      Register load_receiver = LoadDescriptor::ReceiverRegister();
-      Register load_name = LoadDescriptor::NameRegister();
-
-      // Initial send value is undefined.
-      __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-      __ Branch(&l_next);
-
-      // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
-      __ bind(&l_catch);
-      __ mov(a0, v0);
-      __ LoadRoot(load_name, Heap::kthrow_stringRootIndex);  // "throw"
-      __ lw(a3, MemOperand(sp, 1 * kPointerSize));           // iter
-      __ Push(load_name, a3, a0);                     // "throw", iter, except
-      __ jmp(&l_call);
-
-      // try { received = %yield result }
-      // Shuffle the received result above a try handler and yield it without
-      // re-boxing.
-      __ bind(&l_try);
-      __ pop(a0);                                        // result
-      int handler_index = NewHandlerTableEntry();
-      EnterTryBlock(handler_index, &l_catch);
-      const int try_block_size = TryCatch::kElementCount * kPointerSize;
-      __ push(a0);                                       // result
-
-      __ jmp(&l_suspend);
-      __ bind(&l_continuation);
-      __ RecordGeneratorContinuation();
-      __ mov(a0, v0);
-      __ jmp(&l_resume);
-
-      __ bind(&l_suspend);
-      const int generator_object_depth = kPointerSize + try_block_size;
-      __ lw(a0, MemOperand(sp, generator_object_depth));
-      __ push(a0);                                       // g
-      __ Push(Smi::FromInt(handler_index));              // handler-index
-      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
-      __ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
-      __ sw(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
-      __ sw(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset));
-      __ mov(a1, cp);
-      __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
-                          kRAHasBeenSaved, kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
-      __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ pop(v0);                                      // result
-      EmitReturnSequence();
-      __ mov(a0, v0);
-      __ bind(&l_resume);                              // received in a0
-      ExitTryBlock(handler_index);
-
-      // receiver = iter; f = 'next'; arg = received;
-      __ bind(&l_next);
-
-      __ LoadRoot(load_name, Heap::knext_stringRootIndex);  // "next"
-      __ lw(a3, MemOperand(sp, 1 * kPointerSize));          // iter
-      __ Push(load_name, a3, a0);                      // "next", iter, received
-
-      // result = receiver[f](arg);
-      __ bind(&l_call);
-      __ lw(load_receiver, MemOperand(sp, kPointerSize));
-      __ lw(load_name, MemOperand(sp, 2 * kPointerSize));
-      __ li(LoadDescriptor::SlotRegister(),
-            Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
-      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
-      CallIC(ic, TypeFeedbackId::None());
-      __ mov(a0, v0);
-      __ mov(a1, a0);
-      __ sw(a1, MemOperand(sp, 2 * kPointerSize));
-      SetCallPosition(expr);
-      __ li(a0, Operand(1));
-      __ Call(
-          isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
-          RelocInfo::CODE_TARGET);
-
-      __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ Drop(1);  // The function is still on the stack; drop it.
-
-      // if (!result.done) goto l_try;
-      __ Move(load_receiver, v0);
-
-      __ push(load_receiver);                               // save result
-      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
-      __ li(LoadDescriptor::SlotRegister(),
-            Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);  // v0=result.done
-      __ mov(a0, v0);
-      Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
-      CallIC(bool_ic);
-      __ LoadRoot(at, Heap::kTrueValueRootIndex);
-      __ Branch(&l_try, ne, result_register(), Operand(at));
-
-      // result.value
-      __ pop(load_receiver);                                 // result
-      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
-      __ li(LoadDescriptor::SlotRegister(),
-            Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);                         // v0=result.value
-      context()->DropAndPlug(2, v0);                         // drop iter and g
-      break;
-    }
+    case Yield::kDelegating:
+      UNREACHABLE();
   }
 }
 
@@ -2096,7 +1950,14 @@
   // a1 will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
   VisitForAccumulatorValue(value);
-  __ pop(a1);
+  PopOperand(a1);
+
+  // Store input value into generator object.
+  __ sw(result_register(),
+        FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
+  __ mov(a2, result_register());
+  __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, a2, a3,
+                      kRAHasBeenSaved, kDontSaveFPRegs);
 
   // Load suspended function and context.
   __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
@@ -2149,6 +2010,7 @@
     __ Addu(a3, a3, Operand(a2));
     __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
     __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
     __ Jump(a3);
     __ bind(&slow_resume);
   }
@@ -2162,6 +2024,7 @@
   __ push(a2);
   __ Branch(&push_operand_holes);
   __ bind(&call_resume);
+  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
   DCHECK(!result_register().is(a1));
   __ Push(a1, result_register());
   __ Push(Smi::FromInt(resume_mode));
@@ -2173,6 +2036,36 @@
   context()->Plug(result_register());
 }
 
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+  OperandStackDepthIncrement(2);
+  __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+                                     Register reg3) {
+  OperandStackDepthIncrement(3);
+  __ Push(reg1, reg2, reg3);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+                                     Register reg3, Register reg4) {
+  OperandStackDepthIncrement(4);
+  __ Push(reg1, reg2, reg3, reg4);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+  OperandStackDepthDecrement(2);
+  __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+  if (FLAG_debug_code) {
+    int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+                        operand_stack_depth_ * kPointerSize;
+    __ Subu(v0, fp, sp);
+    __ Assert(eq, kUnexpectedStackDepth, v0, Operand(expected_diff));
+  }
+}
 
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
@@ -2207,38 +2100,7 @@
   __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
   __ li(LoadDescriptor::SlotRegister(),
         Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object.
-  SetExpressionPosition(prop);
-
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(prop->IsSuperAccess());
-
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
-  __ li(LoadDescriptor::SlotRegister(),
-        Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object, key.
-  SetExpressionPosition(prop);
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallLoadIC(NOT_INSIDE_TYPEOF);
 }
 
 
@@ -2254,7 +2116,7 @@
   // Get the arguments.
   Register left = a1;
   Register right = a0;
-  __ pop(left);
+  PopOperand(left);
   __ mov(a0, result_register());
 
   // Perform combined smi check on both operands.
@@ -2264,8 +2126,7 @@
   patch_site.EmitJumpIfSmi(scratch1, &smi_case);
 
   __ bind(&stub_call);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ jmp(&done);
@@ -2334,27 +2195,17 @@
 
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  // Constructor is in v0.
-  DCHECK(lit != NULL);
-  __ push(v0);
-
-  // No access check is needed here since the constructor is created by the
-  // class literal.
-  Register scratch = a1;
-  __ lw(scratch,
-        FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
-  __ push(scratch);
-
   for (int i = 0; i < lit->properties()->length(); i++) {
     ObjectLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
+    Register scratch = a1;
     if (property->is_static()) {
       __ lw(scratch, MemOperand(sp, kPointerSize));  // constructor
     } else {
       __ lw(scratch, MemOperand(sp, 0));  // prototype
     }
-    __ push(scratch);
+    PushOperand(scratch);
     EmitPropertyKey(property, lit->GetIdForProperty(i));
 
     // The static prototype property is read only. We handle the non computed
@@ -2377,37 +2228,32 @@
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();
       case ObjectLiteral::Property::COMPUTED:
-        __ CallRuntime(Runtime::kDefineClassMethod);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
       case ObjectLiteral::Property::GETTER:
-        __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
-        __ push(a0);
-        __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
       case ObjectLiteral::Property::SETTER:
-        __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
-        __ push(a0);
-        __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
       default:
         UNREACHABLE();
     }
   }
-
-  // Set both the prototype and constructor to have fast properties, and also
-  // freeze them in strong mode.
-  __ CallRuntime(Runtime::kFinalizeClassDefinition);
 }
 
 
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
   __ mov(a0, result_register());
-  __ pop(a1);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  PopOperand(a1);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
@@ -2430,10 +2276,10 @@
       break;
     }
     case NAMED_PROPERTY: {
-      __ push(result_register());  // Preserve value.
+      PushOperand(result_register());  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
       __ mov(StoreDescriptor::ReceiverRegister(), result_register());
-      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
       __ li(StoreDescriptor::NameRegister(),
             Operand(prop->key()->AsLiteral()->value()));
       EmitLoadStoreICSlot(slot);
@@ -2441,7 +2287,7 @@
       break;
     }
     case NAMED_SUPER_PROPERTY: {
-      __ Push(v0);
+      PushOperand(v0);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2458,7 +2304,7 @@
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      __ Push(v0);
+      PushOperand(v0);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForStackValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2478,12 +2324,12 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ push(result_register());  // Preserve value.
+      PushOperand(result_register());  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ mov(StoreDescriptor::NameRegister(), result_register());
-      __ Pop(StoreDescriptor::ValueRegister(),
-             StoreDescriptor::ReceiverRegister());
+      PopOperands(StoreDescriptor::ValueRegister(),
+                  StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(slot);
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2567,16 +2413,17 @@
              (var->mode() == CONST && op == Token::INIT)) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
-      __ li(a1, Operand(var->name()));
-      __ li(a0, Operand(Smi::FromInt(language_mode())));
-      __ Push(v0, cp, a1, a0);  // Value, context, name, language mode.
-      __ CallRuntime(Runtime::kStoreLookupSlot);
+      __ Push(var->name());
+      __ Push(v0);
+      __ CallRuntime(is_strict(language_mode())
+                         ? Runtime::kStoreLookupSlot_Strict
+                         : Runtime::kStoreLookupSlot_Sloppy);
     } else {
       // Assignment to var or initializing assignment to let/const in harmony
       // mode.
       DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
       MemOperand location = VarOperand(var, a1);
-      if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
         // Check for an uninitialized let binding.
         __ lw(a2, location);
         __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
@@ -2622,7 +2469,7 @@
   __ mov(StoreDescriptor::ValueRegister(), result_register());
   __ li(StoreDescriptor::NameRegister(),
         Operand(prop->key()->AsLiteral()->value()));
-  __ pop(StoreDescriptor::ReceiverRegister());
+  PopOperand(StoreDescriptor::ReceiverRegister());
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
@@ -2639,10 +2486,11 @@
   Literal* key = prop->key()->AsLiteral();
   DCHECK(key != NULL);
 
-  __ Push(key->value());
-  __ Push(v0);
-  __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
-                                             : Runtime::kStoreToSuper_Sloppy));
+  PushOperand(key->value());
+  PushOperand(v0);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreToSuper_Strict
+                              : Runtime::kStoreToSuper_Sloppy);
 }
 
 
@@ -2652,10 +2500,10 @@
   // stack : receiver ('this'), home_object, key
   DCHECK(prop != NULL);
 
-  __ Push(v0);
-  __ CallRuntime((is_strict(language_mode())
-                      ? Runtime::kStoreKeyedToSuper_Strict
-                      : Runtime::kStoreKeyedToSuper_Sloppy));
+  PushOperand(v0);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreKeyedToSuper_Strict
+                              : Runtime::kStoreKeyedToSuper_Sloppy);
 }
 
 
@@ -2667,7 +2515,8 @@
   // - a1 is the key,
   // - a2 is the receiver.
   __ mov(StoreDescriptor::ValueRegister(), result_register());
-  __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+  PopOperands(StoreDescriptor::ReceiverRegister(),
+              StoreDescriptor::NameRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(a0));
 
   Handle<Code> ic =
@@ -2702,7 +2551,7 @@
       VisitForStackValue(expr->obj());
       VisitForAccumulatorValue(expr->key());
       __ Move(LoadDescriptor::NameRegister(), v0);
-      __ pop(LoadDescriptor::ReceiverRegister());
+      PopOperand(LoadDescriptor::ReceiverRegister());
       EmitKeyedPropertyLoad(expr);
     } else {
       VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2738,7 +2587,7 @@
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    __ push(at);
+    PushOperand(at);
     convert_mode = ConvertReceiverMode::kNullOrUndefined;
   } else {
     // Load the function from the receiver.
@@ -2749,7 +2598,7 @@
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
     __ lw(at, MemOperand(sp, 0));
-    __ push(at);
+    PushOperand(at);
     __ sw(v0, MemOperand(sp, kPointerSize));
     convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
   }
@@ -2773,9 +2622,8 @@
   VisitForAccumulatorValue(super_ref->home_object());
   __ mov(scratch, v0);
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(scratch, v0, v0, scratch);
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
+  PushOperands(scratch, v0, v0, scratch);
+  PushOperand(key->value());
 
   // Stack here:
   //  - home_object
@@ -2783,8 +2631,7 @@
   //  - this (receiver) <-- LoadFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
 
   // Replace home_object with target function.
   __ sw(v0, MemOperand(sp, kPointerSize));
@@ -2813,7 +2660,7 @@
 
   // Push the target function under the receiver.
   __ lw(at, MemOperand(sp, 0));
-  __ push(at);
+  PushOperand(at);
   __ sw(v0, MemOperand(sp, kPointerSize));
 
   EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2833,9 +2680,8 @@
   VisitForAccumulatorValue(super_ref->home_object());
   __ Move(scratch, v0);
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(scratch, v0, v0, scratch);
+  PushOperands(scratch, v0, v0, scratch);
   VisitForStackValue(prop->key());
-  __ Push(Smi::FromInt(language_mode()));
 
   // Stack here:
   //  - home_object
@@ -2843,8 +2689,7 @@
   //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
 
   // Replace home_object with target function.
   __ sw(v0, MemOperand(sp, kPointerSize));
@@ -2867,12 +2712,23 @@
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
   // Record source position of the IC call.
   SetCallPosition(expr);
-  Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+  if (expr->tail_call_mode() == TailCallMode::kAllow) {
+    if (FLAG_trace) {
+      __ CallRuntime(Runtime::kTraceTailCall);
+    }
+    // Update profiling counters before the tail call since we will
+    // not return to this function.
+    EmitProfilingCounterHandlingForReturnSequence(true);
+  }
+  Handle<Code> ic =
+      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+          .code();
   __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   // Don't assign a type feedback id to the IC, since type feedback is provided
   // by the vector above.
   CallIC(ic);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2918,11 +2774,9 @@
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in v0)
     // and the object holding it (returned in v1).
-    DCHECK(!context_register().is(a2));
-    __ li(a2, Operand(callee->name()));
-    __ Push(context_register(), a2);
-    __ CallRuntime(Runtime::kLoadLookupSlot);
-    __ Push(v0, v1);  // Function, receiver.
+    __ Push(callee->name());
+    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+    PushOperands(v0, v1);  // Function, receiver.
     PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the
@@ -2944,7 +2798,7 @@
     VisitForStackValue(callee);
     // refEnv.WithBaseObject()
     __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-    __ push(a2);  // Reserved receiver slot.
+    PushOperand(a2);  // Reserved receiver slot.
   }
 }
 
@@ -2976,7 +2830,10 @@
   SetCallPosition(expr);
   __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ li(a0, Operand(arg_count));
-  __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                      expr->tail_call_mode()),
+          RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   // Restore context register.
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3017,6 +2874,7 @@
 
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   // Restore context register.
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3037,7 +2895,7 @@
         FieldMemOperand(result_register(), HeapObject::kMapOffset));
   __ lw(result_register(),
         FieldMemOperand(result_register(), Map::kPrototypeOffset));
-  __ Push(result_register());
+  PushOperand(result_register());
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = expr->arguments();
@@ -3059,6 +2917,7 @@
   __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
 
   __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
 
@@ -3112,81 +2971,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(v0, if_false);
-  __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, a1, Operand(SIMD128_VALUE_TYPE), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(v0, if_false);
-  __ GetObjectType(v0, a1, a2);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  __ Branch(if_true, hs, a2, Operand(FIRST_FUNCTION_TYPE));
-  __ Branch(if_false);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
-  __ lw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
-  __ lw(a1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
-  __ li(t0, 0x80000000);
-  Label not_nan;
-  __ Branch(&not_nan, ne, a2, Operand(t0));
-  __ mov(t0, zero_reg);
-  __ mov(a2, a1);
-  __ bind(&not_nan);
-
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, a2, Operand(t0), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3276,65 +3060,6 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ pop(a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in a1 and the formal
-  // parameter count in a0.
-  VisitForAccumulatorValue(args->at(0));
-  __ mov(a1, v0);
-  __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-  ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
-  __ CallStub(&stub);
-  context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  DCHECK(expr->arguments()->length() == 0);
-  Label exit;
-  // Get the number of formal parameters.
-  __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
-  __ Branch(&exit, ne, a3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ lw(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3404,28 +3129,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = nullptr;
-  Label* if_false = nullptr;
-  Label* fall_through = nullptr;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(v0, if_false);
-  __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, a1, Operand(JS_DATE_TYPE), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(3, args->length());
@@ -3437,7 +3140,7 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(index, value);
+  PopOperands(index, value);
 
   if (FLAG_debug_code) {
     __ SmiTst(value, at);
@@ -3474,7 +3177,7 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(index, value);
+  PopOperands(index, value);
 
   if (FLAG_debug_code) {
     __ SmiTst(value, at);
@@ -3500,35 +3203,6 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));  // Load the object.
-  VisitForAccumulatorValue(args->at(1));  // Load the value.
-  __ pop(a1);  // v0 = value. a1 = object.
-
-  Label done;
-  // If the object is a smi, return the value.
-  __ JumpIfSmi(a1, &done);
-
-  // If the object is not a value type, return the value.
-  __ GetObjectType(a1, a2, a2);
-  __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
-
-  // Store the value.
-  __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  __ mov(a2, v0);
-  __ RecordWriteField(
-      a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
-
-  __ bind(&done);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3546,26 +3220,6 @@
 }
 
 
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into v0 and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  Label convert, done_convert;
-  __ JumpIfSmi(v0, &convert);
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ GetObjectType(v0, a1, a1);
-  __ Branch(&done_convert, le, a1, Operand(LAST_NAME_TYPE));
-  __ bind(&convert);
-  __ Push(v0);
-  __ CallRuntime(Runtime::kToName);
-  __ bind(&done_convert);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3597,7 +3251,7 @@
   Register index = a0;
   Register result = v0;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3645,7 +3299,7 @@
   Register scratch = a3;
   Register result = v0;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3695,6 +3349,7 @@
   // Call the target.
   __ li(a0, Operand(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(argc + 1);
   // Restore context register.
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   // Discard the function left on TOS.
@@ -3748,240 +3403,6 @@
 }
 
 
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
-  Label bailout, done, one_char_separator, long_separator,
-      non_trivial_array, not_size_one_array, loop,
-      empty_separator_loop, one_char_separator_loop,
-      one_char_separator_loop_entry, long_separator_loop;
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(1));
-  VisitForAccumulatorValue(args->at(0));
-
-  // All aliases of the same register have disjoint lifetimes.
-  Register array = v0;
-  Register elements = no_reg;  // Will be v0.
-  Register result = no_reg;  // Will be v0.
-  Register separator = a1;
-  Register array_length = a2;
-  Register result_pos = no_reg;  // Will be a2.
-  Register string_length = a3;
-  Register string = t0;
-  Register element = t1;
-  Register elements_end = t2;
-  Register scratch1 = t3;
-  Register scratch2 = t5;
-  Register scratch3 = t4;
-
-  // Separator operand is on the stack.
-  __ pop(separator);
-
-  // Check that the array is a JSArray.
-  __ JumpIfSmi(array, &bailout);
-  __ GetObjectType(array, scratch1, scratch2);
-  __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
-
-  // Check that the array has fast elements.
-  __ CheckFastElements(scratch1, scratch2, &bailout);
-
-  // If the array has length zero, return the empty string.
-  __ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
-  __ SmiUntag(array_length);
-  __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
-  __ LoadRoot(v0, Heap::kempty_stringRootIndex);
-  __ Branch(&done);
-
-  __ bind(&non_trivial_array);
-
-  // Get the FixedArray containing array's elements.
-  elements = array;
-  __ lw(elements, FieldMemOperand(array, JSArray::kElementsOffset));
-  array = no_reg;  // End of array's live range.
-
-  // Check that all array elements are sequential one-byte strings, and
-  // accumulate the sum of their lengths, as a smi-encoded value.
-  __ mov(string_length, zero_reg);
-  __ Addu(element,
-          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(elements_end, array_length, kPointerSizeLog2);
-  __ Addu(elements_end, element, elements_end);
-  // Loop condition: while (element < elements_end).
-  // Live values in registers:
-  //   elements: Fixed array of strings.
-  //   array_length: Length of the fixed array of strings (not smi)
-  //   separator: Separator string
-  //   string_length: Accumulated sum of string lengths (smi).
-  //   element: Current array element.
-  //   elements_end: Array end.
-  if (generate_debug_code_) {
-    __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin, array_length,
-              Operand(zero_reg));
-  }
-  __ bind(&loop);
-  __ lw(string, MemOperand(element));
-  __ Addu(element, element, kPointerSize);
-  __ JumpIfSmi(string, &bailout);
-  __ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-  __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
-  __ AddBranchOvf(string_length, string_length, Operand(scratch1), &bailout);
-  __ Branch(&loop, lt, element, Operand(elements_end));
-
-  // If array_length is 1, return elements[0], a string.
-  __ Branch(&not_size_one_array, ne, array_length, Operand(1));
-  __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
-  __ Branch(&done);
-
-  __ bind(&not_size_one_array);
-
-  // Live values in registers:
-  //   separator: Separator string
-  //   array_length: Length of the array.
-  //   string_length: Sum of string lengths (smi).
-  //   elements: FixedArray of strings.
-
-  // Check that the separator is a flat one-byte string.
-  __ JumpIfSmi(separator, &bailout);
-  __ lw(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
-  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-
-  // Add (separator length times array_length) - separator length to the
-  // string_length to get the length of the result string. array_length is not
-  // smi but the other values are, so the result is a smi.
-  __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
-  __ Subu(string_length, string_length, Operand(scratch1));
-  __ Mul(scratch3, scratch2, array_length, scratch1);
-  // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
-  // zero.
-  __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
-  __ And(scratch3, scratch2, Operand(0x80000000));
-  __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
-  __ AddBranchOvf(string_length, string_length, Operand(scratch2), &bailout);
-  __ SmiUntag(string_length);
-
-  // Bailout for large object allocations.
-  __ Branch(&bailout, gt, string_length,
-            Operand(Page::kMaxRegularHeapObjectSize));
-
-  // Get first element in the array to free up the elements register to be used
-  // for the result.
-  __ Addu(element,
-          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  result = elements;  // End of live range for elements.
-  elements = no_reg;
-  // Live values in registers:
-  //   element: First array element
-  //   separator: Separator string
-  //   string_length: Length of result string (not smi)
-  //   array_length: Length of the array.
-  __ AllocateOneByteString(result, string_length, scratch1, scratch2,
-                           elements_end, &bailout);
-  // Prepare for looping. Set up elements_end to end of the array. Set
-  // result_pos to the position of the result where to write the first
-  // character.
-  __ sll(elements_end, array_length, kPointerSizeLog2);
-  __ Addu(elements_end, element, elements_end);
-  result_pos = array_length;  // End of live range for array_length.
-  array_length = no_reg;
-  __ Addu(result_pos,
-          result,
-          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  // Check the length of the separator.
-  __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
-  __ li(at, Operand(Smi::FromInt(1)));
-  __ Branch(&one_char_separator, eq, scratch1, Operand(at));
-  __ Branch(&long_separator, gt, scratch1, Operand(at));
-
-  // Empty separator case.
-  __ bind(&empty_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-
-  // Copy next array element to the result.
-  __ lw(string, MemOperand(element));
-  __ Addu(element, element, kPointerSize);
-  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-  // End while (element < elements_end).
-  __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
-  DCHECK(result.is(v0));
-  __ Branch(&done);
-
-  // One-character separator case.
-  __ bind(&one_char_separator);
-  // Replace separator with its one-byte character value.
-  __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator.
-  __ jmp(&one_char_separator_loop_entry);
-
-  __ bind(&one_char_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-  //   separator: Single separator one-byte char (in lower byte).
-
-  // Copy the separator character to the result.
-  __ sb(separator, MemOperand(result_pos));
-  __ Addu(result_pos, result_pos, 1);
-
-  // Copy next array element to the result.
-  __ bind(&one_char_separator_loop_entry);
-  __ lw(string, MemOperand(element));
-  __ Addu(element, element, kPointerSize);
-  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-  // End while (element < elements_end).
-  __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
-  DCHECK(result.is(v0));
-  __ Branch(&done);
-
-  // Long separator case (separator is more than one character). Entry is at the
-  // label long_separator below.
-  __ bind(&long_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-  //   separator: Separator string.
-
-  // Copy the separator to the result.
-  __ lw(string_length, FieldMemOperand(separator, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ Addu(string,
-          separator,
-          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-
-  __ bind(&long_separator);
-  __ lw(string, MemOperand(element));
-  __ Addu(element, element, kPointerSize);
-  __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-  // End while (element < elements_end).
-  __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
-  DCHECK(result.is(v0));
-  __ Branch(&done);
-
-  __ bind(&bailout);
-  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-  __ bind(&done);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -4014,7 +3435,7 @@
   __ jmp(&done);
 
   __ bind(&runtime);
-  __ CallRuntime(Runtime::kCreateIterResultObject);
+  CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
 
   __ bind(&done);
   context()->Plug(v0);
@@ -4024,7 +3445,7 @@
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
   // Push undefined as the receiver.
   __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-  __ push(v0);
+  PushOperand(v0);
 
   __ LoadNativeContextSlot(expr->context_index(), v0);
 }
@@ -4039,6 +3460,7 @@
   __ li(a0, Operand(arg_count));
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 }
 
 
@@ -4052,7 +3474,7 @@
 
     // Push the target function under the receiver.
     __ lw(at, MemOperand(sp, 0));
-    __ push(at);
+    PushOperand(at);
     __ sw(v0, MemOperand(sp, kPointerSize));
 
     // Push the arguments ("left-to-right").
@@ -4088,6 +3510,7 @@
         // Call the C runtime function.
         PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
         __ CallRuntime(expr->function(), arg_count);
+        OperandStackDepthDecrement(arg_count);
         context()->Plug(v0);
       }
     }
@@ -4105,9 +3528,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ CallRuntime(is_strict(language_mode())
-                           ? Runtime::kDeleteProperty_Strict
-                           : Runtime::kDeleteProperty_Sloppy);
+        CallRuntimeWithOperands(is_strict(language_mode())
+                                    ? Runtime::kDeleteProperty_Strict
+                                    : Runtime::kDeleteProperty_Sloppy);
         context()->Plug(v0);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
@@ -4128,9 +3551,7 @@
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
-          DCHECK(!context_register().is(a2));
-          __ li(a2, Operand(var->name()));
-          __ Push(context_register(), a2);
+          __ Push(var->name());
           __ CallRuntime(Runtime::kDeleteLookupSlot);
           context()->Plug(v0);
         }
@@ -4175,6 +3596,7 @@
                         &materialize_false,
                         &materialize_true,
                         &materialize_true);
+        if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
         PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
         __ LoadRoot(v0, Heap::kTrueValueRootIndex);
@@ -4225,7 +3647,7 @@
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
       __ li(at, Operand(Smi::FromInt(0)));
-      __ push(at);
+      PushOperand(at);
     }
     switch (assign_type) {
       case NAMED_PROPERTY: {
@@ -4240,10 +3662,10 @@
         VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
         VisitForAccumulatorValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
-        __ Push(result_register());
+        PushOperand(result_register());
         const Register scratch = a1;
         __ lw(scratch, MemOperand(sp, kPointerSize));
-        __ Push(scratch, result_register());
+        PushOperands(scratch, result_register());
         EmitNamedSuperPropertyLoad(prop);
         break;
       }
@@ -4256,9 +3678,9 @@
         const Register scratch1 = t0;
         __ Move(scratch, result_register());
         VisitForAccumulatorValue(prop->key());
-        __ Push(scratch, result_register());
+        PushOperands(scratch, result_register());
         __ lw(scratch1, MemOperand(sp, 2 * kPointerSize));
-        __ Push(scratch1, scratch, result_register());
+        PushOperands(scratch1, scratch, result_register());
         EmitKeyedSuperPropertyLoad(prop);
         break;
       }
@@ -4344,7 +3766,7 @@
       // of the stack.
       switch (assign_type) {
         case VARIABLE:
-          __ push(v0);
+          PushOperand(v0);
           break;
         case NAMED_PROPERTY:
           __ sw(v0, MemOperand(sp, kPointerSize));
@@ -4368,9 +3790,7 @@
 
   SetExpressionPosition(expr);
 
-
-  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
-                                              strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
   CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
@@ -4404,7 +3824,7 @@
       __ mov(StoreDescriptor::ValueRegister(), result_register());
       __ li(StoreDescriptor::NameRegister(),
             Operand(prop->key()->AsLiteral()->value()));
-      __ pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4441,8 +3861,8 @@
     }
     case KEYED_PROPERTY: {
       __ mov(StoreDescriptor::ValueRegister(), result_register());
-      __ Pop(StoreDescriptor::ReceiverRegister(),
-             StoreDescriptor::NameRegister());
+      PopOperands(StoreDescriptor::ReceiverRegister(),
+                  StoreDescriptor::NameRegister());
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
@@ -4497,8 +3917,8 @@
     __ LoadRoot(at, Heap::kFalseValueRootIndex);
     Split(eq, v0, Operand(at), if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    __ Branch(if_true, eq, v0, Operand(at));
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(if_false, eq, v0, Operand(at));
     __ JumpIfSmi(v0, if_false);
     // Check for undetectable objects => true.
     __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -4564,7 +3984,7 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      __ CallRuntime(Runtime::kHasProperty);
+      CallRuntimeWithOperands(Runtime::kHasProperty);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(t0, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
@@ -4573,7 +3993,7 @@
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
       __ mov(a0, result_register());
-      __ pop(a1);
+      PopOperand(a1);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4586,7 +4006,7 @@
       VisitForAccumulatorValue(expr->right());
       Condition cc = CompareIC::ComputeCondition(op);
       __ mov(a0, result_register());
-      __ pop(a1);
+      PopOperand(a1);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4598,8 +4018,7 @@
         __ bind(&slow_case);
       }
 
-      Handle<Code> ic = CodeFactory::CompareIC(
-                            isolate(), op, strength(language_mode())).code();
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
       PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4686,7 +4105,7 @@
     DCHECK(closure_scope->is_function_scope());
     __ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   }
-  __ push(at);
+  PushOperand(at);
 }
 
 
@@ -4695,23 +4114,12 @@
 
 void FullCodeGenerator::EnterFinallyBlock() {
   DCHECK(!result_register().is(a1));
-  // Store result register while executing finally block.
-  __ push(result_register());
-  // Cook return address in link register to stack (smi encoded Code* delta).
-  __ Subu(a1, ra, Operand(masm_->CodeObject()));
-  DCHECK_EQ(1, kSmiTagSize + kSmiShiftSize);
-  STATIC_ASSERT(0 == kSmiTag);
-  __ Addu(a1, a1, Operand(a1));  // Convert to smi.
-
-  // Store result register while executing finally block.
-  __ push(a1);
-
   // Store pending message while executing finally block.
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ li(at, Operand(pending_message_obj));
   __ lw(a1, MemOperand(at));
-  __ push(a1);
+  PushOperand(a1);
 
   ClearPendingMessage();
 }
@@ -4720,21 +4128,11 @@
 void FullCodeGenerator::ExitFinallyBlock() {
   DCHECK(!result_register().is(a1));
   // Restore pending message from stack.
-  __ pop(a1);
+  PopOperand(a1);
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ li(at, Operand(pending_message_obj));
   __ sw(a1, MemOperand(at));
-
-  // Restore result register from stack.
-  __ pop(a1);
-
-  // Uncook return address and return.
-  __ pop(result_register());
-  DCHECK_EQ(1, kSmiTagSize + kSmiShiftSize);
-  __ sra(a1, a1, 1);  // Un-smi-tag value.
-  __ Addu(at, a1, Operand(masm_->CodeObject()));
-  __ Jump(at);
 }
 
 
@@ -4754,6 +4152,32 @@
         Operand(SmiFromSlot(slot)));
 }
 
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+  DCHECK(!result_register().is(a1));
+  __ Pop(result_register());  // Restore the accumulator.
+  __ Pop(a1);                 // Get the token.
+  for (DeferredCommand cmd : commands_) {
+    Label skip;
+    __ li(at, Operand(Smi::FromInt(cmd.token)));
+    __ Branch(&skip, ne, a1, Operand(at));
+    switch (cmd.command) {
+      case kReturn:
+        codegen_->EmitUnwindAndReturn();
+        break;
+      case kThrow:
+        __ Push(result_register());
+        __ CallRuntime(Runtime::kReThrow);
+        break;
+      case kContinue:
+        codegen_->EmitContinue(cmd.target);
+        break;
+      case kBreak:
+        codegen_->EmitBreak(cmd.target);
+        break;
+    }
+    __ bind(&skip);
+  }
+}
 
 #undef __
 
diff --git a/src/full-codegen/mips64/full-codegen-mips64.cc b/src/full-codegen/mips64/full-codegen-mips64.cc
index 44dd791..c85dee4 100644
--- a/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -27,8 +27,7 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
 
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
@@ -86,6 +85,7 @@
   }
 
  private:
+  MacroAssembler* masm() { return masm_; }
   MacroAssembler* masm_;
   Label patch_site_;
 #ifdef DEBUG
@@ -118,13 +118,6 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-    __ stop("stop-at");
-  }
-#endif
-
   if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
     int receiver_offset = info->scope()->num_parameters() * kPointerSize;
     __ ld(a2, MemOperand(sp, receiver_offset));
@@ -145,6 +138,7 @@
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
     DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+    OperandStackDepthIncrement(locals_count);
     if (locals_count > 0) {
       if (locals_count >= 128) {
         Label ok;
@@ -272,21 +266,12 @@
   Variable* rest_param = scope()->rest_parameter(&rest_index);
   if (rest_param) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
-
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-
-    __ li(RestParamAccessDescriptor::parameter_count(),
-          Operand(Smi::FromInt(num_parameters)));
-    __ Daddu(RestParamAccessDescriptor::parameter_pointer(), fp,
-             Operand(StandardFrameConstants::kCallerSPOffset + offset));
-    __ li(RestParamAccessDescriptor::rest_parameter_index(),
-          Operand(Smi::FromInt(rest_index)));
-    function_in_register_a1 = false;
-
-    RestParamAccessStub stub(isolate());
+    if (!function_in_register_a1) {
+      __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    }
+    FastNewRestParameterStub stub(isolate());
     __ CallStub(&stub);
-
+    function_in_register_a1 = false;
     SetVar(rest_param, v0, a1, a2);
   }
 
@@ -294,28 +279,20 @@
   if (arguments != NULL) {
     // Function uses arguments object.
     Comment cmnt(masm_, "[ Allocate arguments object");
-    DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
     if (!function_in_register_a1) {
       // Load this again, if it's used by the local context below.
       __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     }
-    // Receiver is just before the parameters on the caller's stack.
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-    __ li(ArgumentsAccessNewDescriptor::parameter_count(),
-          Operand(Smi::FromInt(num_parameters)));
-    __ Daddu(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
-             Operand(StandardFrameConstants::kCallerSPOffset + offset));
-
-    // Arguments to ArgumentsAccessStub:
-    //   function, parameter pointer, parameter count.
-    // The stub will rewrite parameter pointer and parameter count if the
-    // previous stack frame was an arguments adapter frame.
-    bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
-    ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
-        is_unmapped, literal()->has_duplicate_parameters());
-    ArgumentsAccessStub stub(isolate(), type);
-    __ CallStub(&stub);
+    if (is_strict(language_mode()) || !has_simple_parameters()) {
+      FastNewStrictArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    } else if (literal()->has_duplicate_parameters()) {
+      __ Push(a1);
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+    } else {
+      FastNewSloppyArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    }
 
     SetVar(arguments, v0, a1, a2);
   }
@@ -430,6 +407,30 @@
   PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
 }
 
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+    bool is_tail_call) {
+  // Pretend that the exit is a backwards jump to the entry.
+  int weight = 1;
+  if (info_->ShouldSelfOptimize()) {
+    weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+  } else {
+    int distance = masm_->pc_offset();
+    weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+  }
+  EmitProfilingCounterDecrement(weight);
+  Label ok;
+  __ Branch(&ok, ge, a3, Operand(zero_reg));
+  // Don't need to save result register if we are going to do a tail call.
+  if (!is_tail_call) {
+    __ push(v0);
+  }
+  __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+  if (!is_tail_call) {
+    __ pop(v0);
+  }
+  EmitProfilingCounterReset();
+  __ bind(&ok);
+}
 
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
@@ -443,24 +444,7 @@
       __ push(v0);
       __ CallRuntime(Runtime::kTraceExit);
     }
-    // Pretend that the exit is a backwards jump to the entry.
-    int weight = 1;
-    if (info_->ShouldSelfOptimize()) {
-      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-    } else {
-      int distance = masm_->pc_offset();
-      weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kCodeSizeMultiplier));
-    }
-    EmitProfilingCounterDecrement(weight);
-    Label ok;
-    __ Branch(&ok, ge, a3, Operand(zero_reg));
-    __ push(v0);
-    __ Call(isolate()->builtins()->InterruptCheck(),
-            RelocInfo::CODE_TARGET);
-    __ pop(v0);
-    EmitProfilingCounterReset();
-    __ bind(&ok);
+    EmitProfilingCounterHandlingForReturnSequence(false);
 
     // Make sure that the constant pool is not emitted inside of the return
     // sequence.
@@ -482,7 +466,7 @@
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -499,7 +483,7 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Heap::RootListIndex index) const {
   __ LoadRoot(result_register(), index);
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -534,7 +518,7 @@
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
   // Immediates cannot be pushed directly.
   __ li(result_register(), Operand(lit));
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -543,7 +527,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ Branch(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -568,41 +552,14 @@
 }
 
 
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
-                                                   Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
-    int count,
-    Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-  __ Move(result_register(), reg);
-}
-
-
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
   DCHECK(count > 0);
-  if (count > 1) __ Drop(count - 1);
+  if (count > 1) codegen()->DropOperands(count - 1);
   __ sd(reg, MemOperand(sp, 0));
 }
 
 
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
-                                                 Register reg) const {
-  DCHECK(count > 0);
-  // For simplicity we always test the accumulator register.
-  __ Drop(count);
-  __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
-  codegen()->DoTest(this);
-}
-
-
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
   DCHECK(materialize_true == materialize_false);
@@ -626,6 +583,7 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
+  codegen()->OperandStackDepthIncrement(1);
   Label done;
   __ bind(materialize_true);
   __ LoadRoot(at, Heap::kTrueValueRootIndex);
@@ -657,7 +615,7 @@
   Heap::RootListIndex value_root_index =
       flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
   __ LoadRoot(at, value_root_index);
-  __ push(at);
+  codegen()->PushOperand(at);
 }
 
 
@@ -782,7 +740,7 @@
   // The variable in the declaration always resides in the current function
   // context.
   DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
-  if (generate_debug_code_) {
+  if (FLAG_debug_code) {
     // Check that we're not inside a with or catch context.
     __ ld(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
     __ LoadRoot(a4, Heap::kWithContextMapRootIndex);
@@ -904,11 +862,11 @@
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ FunctionDeclaration");
       __ li(a2, Operand(variable->name()));
-      __ Push(a2);
+      PushOperand(a2);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
       break;
     }
   }
@@ -983,8 +941,8 @@
 
     // Record position before stub call for type feedback.
     SetExpressionPosition(clause);
-    Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
-                                             strength(language_mode())).code();
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -1005,7 +963,7 @@
   // Discard the test value and jump to the default if present, otherwise to
   // the end of the statement.
   __ bind(&next_test);
-  __ Drop(1);  // Switch value is no longer needed.
+  DropOperands(1);  // Switch value is no longer needed.
   if (default_clause == NULL) {
     __ Branch(nested_statement.break_label());
   } else {
@@ -1040,19 +998,20 @@
   // over the loop.  See ECMA-262 version 5, section 12.6.4.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  __ mov(a0, result_register());  // Result as param to InvokeBuiltin below.
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  __ Branch(&exit, eq, a0, Operand(at));
-  Register null_value = a5;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ Branch(&exit, eq, a0, Operand(null_value));
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-  __ mov(a0, v0);
-  // Convert the object to a JS object.
+  __ mov(a0, result_register());
+  OperandStackDepthIncrement(ForIn::kElementCount);
+
+  // If the object is null or undefined, skip over the loop, otherwise convert
+  // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
   Label convert, done_convert;
   __ JumpIfSmi(a0, &convert);
   __ GetObjectType(a0, a1, a1);
-  __ Branch(&done_convert, ge, a1, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ Branch(USE_DELAY_SLOT, &done_convert, ge, a1,
+            Operand(FIRST_JS_RECEIVER_TYPE));
+  __ LoadRoot(at, Heap::kNullValueRootIndex);  // In delay slot.
+  __ Branch(USE_DELAY_SLOT, &exit, eq, a0, Operand(at));
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);  // In delay slot.
+  __ Branch(&exit, eq, a0, Operand(at));
   __ bind(&convert);
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
@@ -1061,16 +1020,14 @@
   PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
   __ push(a0);
 
-  // Check for proxies.
-  Label call_runtime;
-  __ GetObjectType(a0, a1, a1);
-  __ Branch(&call_runtime, eq, a1, Operand(JS_PROXY_TYPE));
-
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  __ CheckEnumCache(null_value, &call_runtime);
+  // Note: Proxies never have an enum cache, so will always take the
+  // slow path.
+  Label call_runtime;
+  __ CheckEnumCache(&call_runtime);
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -1081,7 +1038,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(a0);  // Duplicate the enumerable object on the stack.
-  __ CallRuntime(Runtime::kGetPropertyNamesFast);
+  __ CallRuntime(Runtime::kForInEnumerate);
   PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
 
   // If we got a map from the runtime call, we can do a fast
@@ -1116,16 +1073,18 @@
   // We got a fixed array in register v0. Iterate through that.
   __ bind(&fixed_array);
 
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(a1);
   __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-  int vector_index = SmiFromSlot(slot)->value();
   __ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
 
   __ li(a1, Operand(Smi::FromInt(1)));  // Smi(1) indicates slow check
   __ Push(a1, v0);  // Smi and array
   __ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+  __ Push(a1);  // Fixed array length (as smi).
+  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
   __ li(a0, Operand(Smi::FromInt(0)));
-  __ Push(a1, a0);  // Fixed array length (as smi) and initial index.
+  __ Push(a0);  // Initial index.
 
   // Generate code for doing the condition check.
   __ bind(&loop);
@@ -1154,6 +1113,16 @@
   __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
   __ Branch(&update_each, eq, a4, Operand(a2));
 
+  // We might get here from TurboFan or Crankshaft when something in the
+  // for-in loop body deopts and only now notice in fullcodegen, that we
+  // can now longer use the enum cache, i.e. left fast mode. So better record
+  // this information here, in case we later OSR back into this loop or
+  // reoptimize the whole function w/o rerunning the loop with the slow
+  // mode object in fullcodegen (which would result in a deopt loop).
+  __ EmitLoadTypeFeedbackVector(a0);
+  __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ sd(a2, FieldMemOperand(a0, FixedArray::OffsetOfElementAt(vector_index)));
+
   // Convert the entry to a string or (smi) 0 if it isn't a property
   // any more. If the property has been removed while iterating, we
   // just skip it.
@@ -1191,7 +1160,7 @@
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
-  __ Drop(5);
+  DropOperands(5);
 
   // Exit and decrement the loop depth.
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1438,12 +1407,11 @@
       // by eval-introduced variables.
       EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
       __ bind(&slow);
-      __ li(a1, Operand(var->name()));
-      __ Push(cp, a1);  // Context and name.
+      __ Push(var->name());
       Runtime::FunctionId function_id =
           typeof_mode == NOT_INSIDE_TYPEOF
               ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotNoReferenceError;
+              : Runtime::kLoadLookupSlotInsideTypeof;
       __ CallRuntime(function_id);
       __ bind(&done);
       context()->Plug(v0);
@@ -1468,7 +1436,7 @@
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
     __ LoadRoot(a1, Heap::kNullValueRootIndex);
-    __ push(a1);
+    PushOperand(a1);
   } else {
     VisitForStackValue(expression);
     if (NeedsHomeObject(expression)) {
@@ -1512,7 +1480,7 @@
     Literal* key = property->key()->AsLiteral();
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(v0);  // Save result on stack.
+      PushOperand(v0);  // Save result on stack.
       result_saved = true;
     }
     switch (property->kind()) {
@@ -1545,7 +1513,7 @@
         }
         // Duplicate receiver on stack.
         __ ld(a0, MemOperand(sp));
-        __ push(a0);
+        PushOperand(a0);
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
@@ -1553,19 +1521,19 @@
             EmitSetHomeObject(value, 2, property->GetSlot());
           }
           __ li(a0, Operand(Smi::FromInt(SLOPPY)));  // PropertyAttributes.
-          __ push(a0);
-          __ CallRuntime(Runtime::kSetProperty);
+          PushOperand(a0);
+          CallRuntimeWithOperands(Runtime::kSetProperty);
         } else {
-          __ Drop(3);
+          DropOperands(3);
         }
         break;
       case ObjectLiteral::Property::PROTOTYPE:
         // Duplicate receiver on stack.
         __ ld(a0, MemOperand(sp));
-        __ push(a0);
+        PushOperand(a0);
         VisitForStackValue(value);
         DCHECK(property->emit_store());
-        __ CallRuntime(Runtime::kInternalSetPrototype);
+        CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                                NO_REGISTERS);
         break;
@@ -1588,13 +1556,13 @@
        it != accessor_table.end();
        ++it) {
     __ ld(a0, MemOperand(sp));  // Duplicate receiver.
-    __ push(a0);
+    PushOperand(a0);
     VisitForStackValue(it->first);
     EmitAccessor(it->second->getter);
     EmitAccessor(it->second->setter);
     __ li(a0, Operand(Smi::FromInt(NONE)));
-    __ push(a0);
-    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+    PushOperand(a0);
+    CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1611,18 +1579,18 @@
 
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(v0);  // Save result on the stack
+      PushOperand(v0);  // Save result on the stack
       result_saved = true;
     }
 
     __ ld(a0, MemOperand(sp));  // Duplicate receiver.
-    __ push(a0);
+    PushOperand(a0);
 
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
       DCHECK(!property->is_computed_name());
       VisitForStackValue(value);
       DCHECK(property->emit_store());
-      __ CallRuntime(Runtime::kInternalSetPrototype);
+      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                              NO_REGISTERS);
     } else {
@@ -1637,11 +1605,11 @@
         case ObjectLiteral::Property::MATERIALIZED_LITERAL:
         case ObjectLiteral::Property::COMPUTED:
           if (property->emit_store()) {
-            __ li(a0, Operand(Smi::FromInt(NONE)));
-            __ push(a0);
-            __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+            PushOperand(Smi::FromInt(NONE));
+            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
           } else {
-            __ Drop(3);
+            DropOperands(3);
           }
           break;
 
@@ -1650,15 +1618,13 @@
           break;
 
         case ObjectLiteral::Property::GETTER:
-          __ li(a0, Operand(Smi::FromInt(NONE)));
-          __ push(a0);
-          __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
           break;
 
         case ObjectLiteral::Property::SETTER:
-          __ li(a0, Operand(Smi::FromInt(NONE)));
-          __ push(a0);
-          __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
           break;
       }
     }
@@ -1716,14 +1682,14 @@
   int array_index = 0;
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
-    if (subexpr->IsSpread()) break;
+    DCHECK(!subexpr->IsSpread());
 
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
-      __ push(v0);  // array literal
+      PushOperand(v0);  // array literal
       result_saved = true;
     }
 
@@ -1746,21 +1712,16 @@
   // (inclusive) and these elements gets appended to the array. Note that the
   // number elements an iterable produces is unknown ahead of time.
   if (array_index < length && result_saved) {
-    __ Pop(v0);
+    PopOperand(v0);
     result_saved = false;
   }
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
 
-    __ Push(v0);
-    if (subexpr->IsSpread()) {
-      VisitForStackValue(subexpr->AsSpread()->expression());
-      __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
-                       CALL_FUNCTION);
-    } else {
-      VisitForStackValue(subexpr);
-      __ CallRuntime(Runtime::kAppendElement);
-    }
+    PushOperand(v0);
+    DCHECK(!subexpr->IsSpread());
+    VisitForStackValue(subexpr);
+    CallRuntimeWithOperands(Runtime::kAppendElement);
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
   }
@@ -1801,11 +1762,11 @@
           property->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           property->obj()->AsSuperPropertyReference()->home_object());
-      __ Push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
         const Register scratch = a1;
         __ ld(scratch, MemOperand(sp, kPointerSize));
-        __ Push(scratch, result_register());
+        PushOperands(scratch, result_register());
       }
       break;
     case KEYED_SUPER_PROPERTY: {
@@ -1816,11 +1777,11 @@
           property->obj()->AsSuperPropertyReference()->home_object());
       __ Move(scratch, result_register());
       VisitForAccumulatorValue(property->key());
-      __ Push(scratch, result_register());
+      PushOperands(scratch, result_register());
       if (expr->is_compound()) {
         const Register scratch1 = a4;
         __ ld(scratch1, MemOperand(sp, 2 * kPointerSize));
-        __ Push(scratch1, scratch, result_register());
+        PushOperands(scratch1, scratch, result_register());
       }
       break;
     }
@@ -1868,7 +1829,7 @@
     }
 
     Token::Value op = expr->binary_op();
-    __ push(v0);  // Left operand goes on the stack.
+    PushOperand(v0);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
 
     AccumulatorValueContext context(this);
@@ -1934,8 +1895,16 @@
 
       __ jmp(&suspend);
       __ bind(&continuation);
+      // When we arrive here, the stack top is the resume mode and
+      // result_register() holds the input value (the argument given to the
+      // respective resume operation).
       __ RecordGeneratorContinuation();
-      __ jmp(&resume);
+      __ pop(a1);
+      __ Branch(&resume, ne, a1,
+                Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+      __ push(result_register());
+      EmitCreateIteratorResult(true);
+      EmitUnwindAndReturn();
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
@@ -1952,7 +1921,7 @@
       __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
-      __ pop(result_register());
+      PopOperand(result_register());
       EmitReturnSequence();
 
       __ bind(&resume);
@@ -1961,125 +1930,15 @@
     }
 
     case Yield::kFinal: {
-      VisitForAccumulatorValue(expr->generator_object());
-      __ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
-      __ sd(a1, FieldMemOperand(result_register(),
-                                JSGeneratorObject::kContinuationOffset));
       // Pop value from top-of-stack slot, box result into result register.
+      OperandStackDepthDecrement(1);
       EmitCreateIteratorResult(true);
-      EmitUnwindBeforeReturn();
-      EmitReturnSequence();
+      EmitUnwindAndReturn();
       break;
     }
 
-    case Yield::kDelegating: {
-      VisitForStackValue(expr->generator_object());
-
-      // Initial stack layout is as follows:
-      // [sp + 1 * kPointerSize] iter
-      // [sp + 0 * kPointerSize] g
-
-      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
-      Label l_next, l_call;
-      Register load_receiver = LoadDescriptor::ReceiverRegister();
-      Register load_name = LoadDescriptor::NameRegister();
-      // Initial send value is undefined.
-      __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-      __ Branch(&l_next);
-
-      // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
-      __ bind(&l_catch);
-      __ mov(a0, v0);
-      __ LoadRoot(a2, Heap::kthrow_stringRootIndex);  // "throw"
-      __ ld(a3, MemOperand(sp, 1 * kPointerSize));    // iter
-      __ Push(a2, a3, a0);                            // "throw", iter, except
-      __ jmp(&l_call);
-
-      // try { received = %yield result }
-      // Shuffle the received result above a try handler and yield it without
-      // re-boxing.
-      __ bind(&l_try);
-      __ pop(a0);                                        // result
-      int handler_index = NewHandlerTableEntry();
-      EnterTryBlock(handler_index, &l_catch);
-      const int try_block_size = TryCatch::kElementCount * kPointerSize;
-      __ push(a0);                                       // result
-
-      __ jmp(&l_suspend);
-      __ bind(&l_continuation);
-      __ RecordGeneratorContinuation();
-      __ mov(a0, v0);
-      __ jmp(&l_resume);
-
-      __ bind(&l_suspend);
-      const int generator_object_depth = kPointerSize + try_block_size;
-      __ ld(a0, MemOperand(sp, generator_object_depth));
-      __ push(a0);                                       // g
-      __ Push(Smi::FromInt(handler_index));              // handler-index
-      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
-      __ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
-      __ sd(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
-      __ sd(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset));
-      __ mov(a1, cp);
-      __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
-                          kRAHasBeenSaved, kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
-      __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ pop(v0);                                      // result
-      EmitReturnSequence();
-      __ mov(a0, v0);
-      __ bind(&l_resume);                              // received in a0
-      ExitTryBlock(handler_index);
-
-      // receiver = iter; f = 'next'; arg = received;
-      __ bind(&l_next);
-      __ LoadRoot(load_name, Heap::knext_stringRootIndex);  // "next"
-      __ ld(a3, MemOperand(sp, 1 * kPointerSize));          // iter
-      __ Push(load_name, a3, a0);                      // "next", iter, received
-
-      // result = receiver[f](arg);
-      __ bind(&l_call);
-      __ ld(load_receiver, MemOperand(sp, kPointerSize));
-      __ ld(load_name, MemOperand(sp, 2 * kPointerSize));
-      __ li(LoadDescriptor::SlotRegister(),
-            Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
-      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
-      CallIC(ic, TypeFeedbackId::None());
-      __ mov(a0, v0);
-      __ mov(a1, a0);
-      __ sd(a1, MemOperand(sp, 2 * kPointerSize));
-      SetCallPosition(expr);
-      __ li(a0, Operand(1));
-      __ Call(
-          isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
-          RelocInfo::CODE_TARGET);
-
-      __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ Drop(1);  // The function is still on the stack; drop it.
-
-      // if (!result.done) goto l_try;
-      __ Move(load_receiver, v0);
-
-      __ push(load_receiver);                               // save result
-      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
-      __ li(LoadDescriptor::SlotRegister(),
-            Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);  // v0=result.done
-      __ mov(a0, v0);
-      Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
-      CallIC(bool_ic);
-      __ LoadRoot(at, Heap::kTrueValueRootIndex);
-      __ Branch(&l_try, ne, result_register(), Operand(at));
-
-      // result.value
-      __ pop(load_receiver);                                 // result
-      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
-      __ li(LoadDescriptor::SlotRegister(),
-            Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);                         // v0=result.value
-      context()->DropAndPlug(2, v0);                         // drop iter and g
-      break;
-    }
+    case Yield::kDelegating:
+      UNREACHABLE();
   }
 }
 
@@ -2093,7 +1952,14 @@
   // a1 will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
   VisitForAccumulatorValue(value);
-  __ pop(a1);
+  PopOperand(a1);
+
+  // Store input value into generator object.
+  __ sd(result_register(),
+        FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
+  __ mov(a2, result_register());
+  __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, a2, a3,
+                      kRAHasBeenSaved, kDontSaveFPRegs);
 
   // Load suspended function and context.
   __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
@@ -2148,6 +2014,7 @@
     __ Daddu(a3, a3, Operand(a2));
     __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
     __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
     __ Jump(a3);
     __ bind(&slow_resume);
   }
@@ -2161,6 +2028,7 @@
   __ push(a2);
   __ Branch(&push_operand_holes);
   __ bind(&call_resume);
+  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
   DCHECK(!result_register().is(a1));
   __ Push(a1, result_register());
   __ Push(Smi::FromInt(resume_mode));
@@ -2172,6 +2040,36 @@
   context()->Plug(result_register());
 }
 
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+  OperandStackDepthIncrement(2);
+  __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+                                     Register reg3) {
+  OperandStackDepthIncrement(3);
+  __ Push(reg1, reg2, reg3);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+                                     Register reg3, Register reg4) {
+  OperandStackDepthIncrement(4);
+  __ Push(reg1, reg2, reg3, reg4);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+  OperandStackDepthDecrement(2);
+  __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+  if (FLAG_debug_code) {
+    int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+                        operand_stack_depth_ * kPointerSize;
+    __ Dsubu(v0, fp, sp);
+    __ Assert(eq, kUnexpectedStackDepth, v0, Operand(expected_diff));
+  }
+}
 
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
@@ -2206,40 +2104,7 @@
   __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
   __ li(LoadDescriptor::SlotRegister(),
         Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object.
-  SetExpressionPosition(prop);
-
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(prop->IsSuperAccess());
-
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  // Call keyed load IC. It has register arguments receiver and key.
-  SetExpressionPosition(prop);
-
-  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
-  __ li(LoadDescriptor::SlotRegister(),
-        Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object, key.
-  SetExpressionPosition(prop);
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallLoadIC(NOT_INSIDE_TYPEOF);
 }
 
 
@@ -2255,7 +2120,7 @@
   // Get the arguments.
   Register left = a1;
   Register right = a0;
-  __ pop(left);
+  PopOperand(left);
   __ mov(a0, result_register());
 
   // Perform combined smi check on both operands.
@@ -2265,8 +2130,7 @@
   patch_site.EmitJumpIfSmi(scratch1, &smi_case);
 
   __ bind(&stub_call);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ jmp(&done);
@@ -2336,27 +2200,17 @@
 
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  // Constructor is in v0.
-  DCHECK(lit != NULL);
-  __ push(v0);
-
-  // No access check is needed here since the constructor is created by the
-  // class literal.
-  Register scratch = a1;
-  __ ld(scratch,
-        FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
-  __ push(scratch);
-
   for (int i = 0; i < lit->properties()->length(); i++) {
     ObjectLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
+    Register scratch = a1;
     if (property->is_static()) {
       __ ld(scratch, MemOperand(sp, kPointerSize));  // constructor
     } else {
       __ ld(scratch, MemOperand(sp, 0));  // prototype
     }
-    __ push(scratch);
+    PushOperand(scratch);
     EmitPropertyKey(property, lit->GetIdForProperty(i));
 
     // The static prototype property is read only. We handle the non computed
@@ -2379,37 +2233,32 @@
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();
       case ObjectLiteral::Property::COMPUTED:
-        __ CallRuntime(Runtime::kDefineClassMethod);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
       case ObjectLiteral::Property::GETTER:
-        __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
-        __ push(a0);
-        __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
       case ObjectLiteral::Property::SETTER:
-        __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
-        __ push(a0);
-        __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
       default:
         UNREACHABLE();
     }
   }
-
-  // Set both the prototype and constructor to have fast properties, and also
-  // freeze them in strong mode.
-  __ CallRuntime(Runtime::kFinalizeClassDefinition);
 }
 
 
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
   __ mov(a0, result_register());
-  __ pop(a1);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  PopOperand(a1);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
@@ -2432,10 +2281,10 @@
       break;
     }
     case NAMED_PROPERTY: {
-      __ push(result_register());  // Preserve value.
+      PushOperand(result_register());  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
       __ mov(StoreDescriptor::ReceiverRegister(), result_register());
-      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
       __ li(StoreDescriptor::NameRegister(),
             Operand(prop->key()->AsLiteral()->value()));
       EmitLoadStoreICSlot(slot);
@@ -2443,7 +2292,7 @@
       break;
     }
     case NAMED_SUPER_PROPERTY: {
-      __ Push(v0);
+      PushOperand(v0);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2460,7 +2309,7 @@
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      __ Push(v0);
+      PushOperand(v0);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForStackValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2480,12 +2329,12 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ push(result_register());  // Preserve value.
+      PushOperand(result_register());  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ Move(StoreDescriptor::NameRegister(), result_register());
-      __ Pop(StoreDescriptor::ValueRegister(),
-             StoreDescriptor::ReceiverRegister());
+      PopOperands(StoreDescriptor::ValueRegister(),
+                  StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(slot);
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2568,21 +2417,17 @@
   } else if (!var->is_const_mode() ||
              (var->mode() == CONST && op == Token::INIT)) {
     if (var->IsLookupSlot()) {
-      // Assignment to var.
-      __ li(a4, Operand(var->name()));
-      __ li(a3, Operand(Smi::FromInt(language_mode())));
-      // jssp[0]  : language mode.
-      // jssp[8]  : name.
-      // jssp[16] : context.
-      // jssp[24] : value.
-      __ Push(v0, cp, a4, a3);
-      __ CallRuntime(Runtime::kStoreLookupSlot);
+      __ Push(var->name());
+      __ Push(v0);
+      __ CallRuntime(is_strict(language_mode())
+                         ? Runtime::kStoreLookupSlot_Strict
+                         : Runtime::kStoreLookupSlot_Sloppy);
     } else {
       // Assignment to var or initializing assignment to let/const in harmony
       // mode.
       DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
       MemOperand location = VarOperand(var, a1);
-      if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
         // Check for an uninitialized let binding.
         __ ld(a2, location);
         __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
@@ -2628,7 +2473,7 @@
   __ mov(StoreDescriptor::ValueRegister(), result_register());
   __ li(StoreDescriptor::NameRegister(),
         Operand(prop->key()->AsLiteral()->value()));
-  __ pop(StoreDescriptor::ReceiverRegister());
+  PopOperand(StoreDescriptor::ReceiverRegister());
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
@@ -2645,10 +2490,11 @@
   Literal* key = prop->key()->AsLiteral();
   DCHECK(key != NULL);
 
-  __ Push(key->value());
-  __ Push(v0);
-  __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
-                                             : Runtime::kStoreToSuper_Sloppy));
+  PushOperand(key->value());
+  PushOperand(v0);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreToSuper_Strict
+                              : Runtime::kStoreToSuper_Sloppy);
 }
 
 
@@ -2658,10 +2504,10 @@
   // stack : receiver ('this'), home_object, key
   DCHECK(prop != NULL);
 
-  __ Push(v0);
-  __ CallRuntime((is_strict(language_mode())
-                      ? Runtime::kStoreKeyedToSuper_Strict
-                      : Runtime::kStoreKeyedToSuper_Sloppy));
+  PushOperand(v0);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreKeyedToSuper_Strict
+                              : Runtime::kStoreKeyedToSuper_Sloppy);
 }
 
 
@@ -2673,7 +2519,8 @@
   // - a1 is the key,
   // - a2 is the receiver.
   __ mov(StoreDescriptor::ValueRegister(), result_register());
-  __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+  PopOperands(StoreDescriptor::ReceiverRegister(),
+              StoreDescriptor::NameRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(a0));
 
   Handle<Code> ic =
@@ -2708,7 +2555,7 @@
       VisitForStackValue(expr->obj());
       VisitForAccumulatorValue(expr->key());
       __ Move(LoadDescriptor::NameRegister(), v0);
-      __ pop(LoadDescriptor::ReceiverRegister());
+      PopOperand(LoadDescriptor::ReceiverRegister());
       EmitKeyedPropertyLoad(expr);
     } else {
       VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2744,7 +2591,7 @@
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    __ push(at);
+    PushOperand(at);
     convert_mode = ConvertReceiverMode::kNullOrUndefined;
   } else {
     // Load the function from the receiver.
@@ -2755,7 +2602,7 @@
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
     __ ld(at, MemOperand(sp, 0));
-    __ push(at);
+    PushOperand(at);
     __ sd(v0, MemOperand(sp, kPointerSize));
     convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
   }
@@ -2779,9 +2626,8 @@
   VisitForAccumulatorValue(super_ref->home_object());
   __ mov(scratch, v0);
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(scratch, v0, v0, scratch);
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
+  PushOperands(scratch, v0, v0, scratch);
+  PushOperand(key->value());
 
   // Stack here:
   //  - home_object
@@ -2789,8 +2635,7 @@
   //  - this (receiver) <-- LoadFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
 
   // Replace home_object with target function.
   __ sd(v0, MemOperand(sp, kPointerSize));
@@ -2819,7 +2664,7 @@
 
   // Push the target function under the receiver.
   __ ld(at, MemOperand(sp, 0));
-  __ push(at);
+  PushOperand(at);
   __ sd(v0, MemOperand(sp, kPointerSize));
 
   EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2839,9 +2684,8 @@
   VisitForAccumulatorValue(super_ref->home_object());
   __ Move(scratch, v0);
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(scratch, v0, v0, scratch);
+  PushOperands(scratch, v0, v0, scratch);
   VisitForStackValue(prop->key());
-  __ Push(Smi::FromInt(language_mode()));
 
   // Stack here:
   //  - home_object
@@ -2849,8 +2693,7 @@
   //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
 
   // Replace home_object with target function.
   __ sd(v0, MemOperand(sp, kPointerSize));
@@ -2873,12 +2716,24 @@
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
   // Record source position of the IC call.
   SetCallPosition(expr);
-  Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+  if (expr->tail_call_mode() == TailCallMode::kAllow) {
+    if (FLAG_trace) {
+      __ CallRuntime(Runtime::kTraceTailCall);
+    }
+    // Update profiling counters before the tail call since we will
+    // not return to this function.
+    EmitProfilingCounterHandlingForReturnSequence(true);
+  }
+  Handle<Code> ic =
+      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+          .code();
   __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   // Don't assign a type feedback id to the IC, since type feedback is provided
   // by the vector above.
   CallIC(ic);
+  OperandStackDepthDecrement(arg_count + 1);
+
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2923,11 +2778,9 @@
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in v0)
     // and the object holding it (returned in v1).
-    DCHECK(!context_register().is(a2));
-    __ li(a2, Operand(callee->name()));
-    __ Push(context_register(), a2);
-    __ CallRuntime(Runtime::kLoadLookupSlot);
-    __ Push(v0, v1);  // Function, receiver.
+    __ Push(callee->name());
+    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+    PushOperands(v0, v1);  // Function, receiver.
     PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the
@@ -2949,7 +2802,7 @@
     VisitForStackValue(callee);
     // refEnv.WithBaseObject()
     __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-    __ push(a2);  // Reserved receiver slot.
+    PushOperand(a2);  // Reserved receiver slot.
   }
 }
 
@@ -2981,7 +2834,10 @@
   SetCallPosition(expr);
   __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ li(a0, Operand(arg_count));
-  __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                      expr->tail_call_mode()),
+          RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3022,6 +2878,7 @@
 
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   // Restore context register.
   __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3042,7 +2899,7 @@
         FieldMemOperand(result_register(), HeapObject::kMapOffset));
   __ ld(result_register(),
         FieldMemOperand(result_register(), Map::kPrototypeOffset));
-  __ Push(result_register());
+  PushOperand(result_register());
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = expr->arguments();
@@ -3064,6 +2921,7 @@
   __ ld(a1, MemOperand(sp, arg_count * kPointerSize));
 
   __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
 
@@ -3117,81 +2975,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(v0, if_false);
-  __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, a1, Operand(SIMD128_VALUE_TYPE), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(v0, if_false);
-  __ GetObjectType(v0, a1, a2);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  __ Branch(if_true, hs, a2, Operand(FIRST_FUNCTION_TYPE));
-  __ Branch(if_false);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
-  __ lwu(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
-  __ lwu(a1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
-  __ li(a4, 0x80000000);
-  Label not_nan;
-  __ Branch(&not_nan, ne, a2, Operand(a4));
-  __ mov(a4, zero_reg);
-  __ mov(a2, a1);
-  __ bind(&not_nan);
-
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, a2, Operand(a4), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3281,65 +3064,6 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ pop(a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in a1 and the formal
-  // parameter count in a0.
-  VisitForAccumulatorValue(args->at(0));
-  __ mov(a1, v0);
-  __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-  ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
-  __ CallStub(&stub);
-  context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  DCHECK(expr->arguments()->length() == 0);
-  Label exit;
-  // Get the number of formal parameters.
-  __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
-  __ Branch(&exit, ne, a3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ ld(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3409,28 +3133,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = nullptr;
-  Label* if_false = nullptr;
-  Label* fall_through = nullptr;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(v0, if_false);
-  __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, a1, Operand(JS_DATE_TYPE), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(3, args->length());
@@ -3442,7 +3144,7 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(index, value);
+  PopOperands(index, value);
 
   if (FLAG_debug_code) {
     __ SmiTst(value, at);
@@ -3479,7 +3181,7 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(index, value);
+  PopOperands(index, value);
 
   if (FLAG_debug_code) {
     __ SmiTst(value, at);
@@ -3506,35 +3208,6 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));  // Load the object.
-  VisitForAccumulatorValue(args->at(1));  // Load the value.
-  __ pop(a1);  // v0 = value. a1 = object.
-
-  Label done;
-  // If the object is a smi, return the value.
-  __ JumpIfSmi(a1, &done);
-
-  // If the object is not a value type, return the value.
-  __ GetObjectType(a1, a2, a2);
-  __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
-
-  // Store the value.
-  __ sd(v0, FieldMemOperand(a1, JSValue::kValueOffset));
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  __ mov(a2, v0);
-  __ RecordWriteField(
-      a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
-
-  __ bind(&done);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3552,26 +3225,6 @@
 }
 
 
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into v0 and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  Label convert, done_convert;
-  __ JumpIfSmi(v0, &convert);
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ GetObjectType(v0, a1, a1);
-  __ Branch(&done_convert, le, a1, Operand(LAST_NAME_TYPE));
-  __ bind(&convert);
-  __ Push(v0);
-  __ CallRuntime(Runtime::kToName);
-  __ bind(&done_convert);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3603,7 +3256,7 @@
   Register index = a0;
   Register result = v0;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3651,7 +3304,7 @@
   Register scratch = a3;
   Register result = v0;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3701,6 +3354,7 @@
   // Call the target.
   __ li(a0, Operand(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(argc + 1);
   // Restore context register.
   __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   // Discard the function left on TOS.
@@ -3754,242 +3408,6 @@
 }
 
 
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
-  Label bailout, done, one_char_separator, long_separator,
-      non_trivial_array, not_size_one_array, loop,
-      empty_separator_loop, one_char_separator_loop,
-      one_char_separator_loop_entry, long_separator_loop;
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(1));
-  VisitForAccumulatorValue(args->at(0));
-
-  // All aliases of the same register have disjoint lifetimes.
-  Register array = v0;
-  Register elements = no_reg;  // Will be v0.
-  Register result = no_reg;  // Will be v0.
-  Register separator = a1;
-  Register array_length = a2;
-  Register result_pos = no_reg;  // Will be a2.
-  Register string_length = a3;
-  Register string = a4;
-  Register element = a5;
-  Register elements_end = a6;
-  Register scratch1 = a7;
-  Register scratch2 = t1;
-  Register scratch3 = t0;
-
-  // Separator operand is on the stack.
-  __ pop(separator);
-
-  // Check that the array is a JSArray.
-  __ JumpIfSmi(array, &bailout);
-  __ GetObjectType(array, scratch1, scratch2);
-  __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
-
-  // Check that the array has fast elements.
-  __ CheckFastElements(scratch1, scratch2, &bailout);
-
-  // If the array has length zero, return the empty string.
-  __ ld(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
-  __ SmiUntag(array_length);
-  __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
-  __ LoadRoot(v0, Heap::kempty_stringRootIndex);
-  __ Branch(&done);
-
-  __ bind(&non_trivial_array);
-
-  // Get the FixedArray containing array's elements.
-  elements = array;
-  __ ld(elements, FieldMemOperand(array, JSArray::kElementsOffset));
-  array = no_reg;  // End of array's live range.
-
-  // Check that all array elements are sequential one-byte strings, and
-  // accumulate the sum of their lengths, as a smi-encoded value.
-  __ mov(string_length, zero_reg);
-  __ Daddu(element,
-          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ dsll(elements_end, array_length, kPointerSizeLog2);
-  __ Daddu(elements_end, element, elements_end);
-  // Loop condition: while (element < elements_end).
-  // Live values in registers:
-  //   elements: Fixed array of strings.
-  //   array_length: Length of the fixed array of strings (not smi)
-  //   separator: Separator string
-  //   string_length: Accumulated sum of string lengths (smi).
-  //   element: Current array element.
-  //   elements_end: Array end.
-  if (generate_debug_code_) {
-    __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin, array_length,
-              Operand(zero_reg));
-  }
-  __ bind(&loop);
-  __ ld(string, MemOperand(element));
-  __ Daddu(element, element, kPointerSize);
-  __ JumpIfSmi(string, &bailout);
-  __ ld(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-  __ ld(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
-  __ DadduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
-  __ BranchOnOverflow(&bailout, scratch3);
-  __ Branch(&loop, lt, element, Operand(elements_end));
-
-  // If array_length is 1, return elements[0], a string.
-  __ Branch(&not_size_one_array, ne, array_length, Operand(1));
-  __ ld(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
-  __ Branch(&done);
-
-  __ bind(&not_size_one_array);
-
-  // Live values in registers:
-  //   separator: Separator string
-  //   array_length: Length of the array.
-  //   string_length: Sum of string lengths (smi).
-  //   elements: FixedArray of strings.
-
-  // Check that the separator is a flat one-byte string.
-  __ JumpIfSmi(separator, &bailout);
-  __ ld(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
-  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-
-  // Add (separator length times array_length) - separator length to the
-  // string_length to get the length of the result string. array_length is not
-  // smi but the other values are, so the result is a smi.
-  __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
-  __ Dsubu(string_length, string_length, Operand(scratch1));
-  __ SmiUntag(scratch1);
-  __ Dmul(scratch2, array_length, scratch1);
-  // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
-  // zero.
-  __ dsra32(scratch1, scratch2, 0);
-  __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
-  __ SmiUntag(string_length);
-  __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
-  __ BranchOnOverflow(&bailout, scratch3);
-
-  // Bailout for large object allocations.
-  __ Branch(&bailout, gt, string_length,
-            Operand(Page::kMaxRegularHeapObjectSize));
-
-  // Get first element in the array to free up the elements register to be used
-  // for the result.
-  __ Daddu(element,
-          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  result = elements;  // End of live range for elements.
-  elements = no_reg;
-  // Live values in registers:
-  //   element: First array element
-  //   separator: Separator string
-  //   string_length: Length of result string (not smi)
-  //   array_length: Length of the array.
-  __ AllocateOneByteString(result, string_length, scratch1, scratch2,
-                           elements_end, &bailout);
-  // Prepare for looping. Set up elements_end to end of the array. Set
-  // result_pos to the position of the result where to write the first
-  // character.
-  __ dsll(elements_end, array_length, kPointerSizeLog2);
-  __ Daddu(elements_end, element, elements_end);
-  result_pos = array_length;  // End of live range for array_length.
-  array_length = no_reg;
-  __ Daddu(result_pos,
-          result,
-          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  // Check the length of the separator.
-  __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
-  __ li(at, Operand(Smi::FromInt(1)));
-  __ Branch(&one_char_separator, eq, scratch1, Operand(at));
-  __ Branch(&long_separator, gt, scratch1, Operand(at));
-
-  // Empty separator case.
-  __ bind(&empty_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-
-  // Copy next array element to the result.
-  __ ld(string, MemOperand(element));
-  __ Daddu(element, element, kPointerSize);
-  __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-  // End while (element < elements_end).
-  __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
-  DCHECK(result.is(v0));
-  __ Branch(&done);
-
-  // One-character separator case.
-  __ bind(&one_char_separator);
-  // Replace separator with its one-byte character value.
-  __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator.
-  __ jmp(&one_char_separator_loop_entry);
-
-  __ bind(&one_char_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-  //   separator: Single separator one-byte char (in lower byte).
-
-  // Copy the separator character to the result.
-  __ sb(separator, MemOperand(result_pos));
-  __ Daddu(result_pos, result_pos, 1);
-
-  // Copy next array element to the result.
-  __ bind(&one_char_separator_loop_entry);
-  __ ld(string, MemOperand(element));
-  __ Daddu(element, element, kPointerSize);
-  __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-  // End while (element < elements_end).
-  __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
-  DCHECK(result.is(v0));
-  __ Branch(&done);
-
-  // Long separator case (separator is more than one character). Entry is at the
-  // label long_separator below.
-  __ bind(&long_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-  //   separator: Separator string.
-
-  // Copy the separator to the result.
-  __ ld(string_length, FieldMemOperand(separator, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ Daddu(string,
-          separator,
-          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-
-  __ bind(&long_separator);
-  __ ld(string, MemOperand(element));
-  __ Daddu(element, element, kPointerSize);
-  __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-  // End while (element < elements_end).
-  __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
-  DCHECK(result.is(v0));
-  __ Branch(&done);
-
-  __ bind(&bailout);
-  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-  __ bind(&done);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -4022,7 +3440,7 @@
   __ jmp(&done);
 
   __ bind(&runtime);
-  __ CallRuntime(Runtime::kCreateIterResultObject);
+  CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
 
   __ bind(&done);
   context()->Plug(v0);
@@ -4032,7 +3450,7 @@
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
   // Push undefined as the receiver.
   __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-  __ push(v0);
+  PushOperand(v0);
 
   __ LoadNativeContextSlot(expr->context_index(), v0);
 }
@@ -4047,6 +3465,7 @@
   __ li(a0, Operand(arg_count));
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 }
 
 
@@ -4060,7 +3479,7 @@
 
     // Push the target function under the receiver.
     __ ld(at, MemOperand(sp, 0));
-    __ push(at);
+    PushOperand(at);
     __ sd(v0, MemOperand(sp, kPointerSize));
 
     // Push the arguments ("left-to-right").
@@ -4095,6 +3514,7 @@
         // Call the C runtime function.
         PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
         __ CallRuntime(expr->function(), arg_count);
+        OperandStackDepthDecrement(arg_count);
         context()->Plug(v0);
       }
     }
@@ -4112,9 +3532,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ CallRuntime(is_strict(language_mode())
-                           ? Runtime::kDeleteProperty_Strict
-                           : Runtime::kDeleteProperty_Sloppy);
+        CallRuntimeWithOperands(is_strict(language_mode())
+                                    ? Runtime::kDeleteProperty_Strict
+                                    : Runtime::kDeleteProperty_Sloppy);
         context()->Plug(v0);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
@@ -4136,8 +3556,7 @@
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
           DCHECK(!context_register().is(a2));
-          __ li(a2, Operand(var->name()));
-          __ Push(context_register(), a2);
+          __ Push(var->name());
           __ CallRuntime(Runtime::kDeleteLookupSlot);
           context()->Plug(v0);
         }
@@ -4182,6 +3601,7 @@
                         &materialize_false,
                         &materialize_true,
                         &materialize_true);
+        if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
         PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
         __ LoadRoot(v0, Heap::kTrueValueRootIndex);
@@ -4232,7 +3652,7 @@
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
       __ li(at, Operand(Smi::FromInt(0)));
-      __ push(at);
+      PushOperand(at);
     }
     switch (assign_type) {
       case NAMED_PROPERTY: {
@@ -4247,10 +3667,10 @@
         VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
         VisitForAccumulatorValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
-        __ Push(result_register());
+        PushOperand(result_register());
         const Register scratch = a1;
         __ ld(scratch, MemOperand(sp, kPointerSize));
-        __ Push(scratch, result_register());
+        PushOperands(scratch, result_register());
         EmitNamedSuperPropertyLoad(prop);
         break;
       }
@@ -4263,9 +3683,9 @@
         const Register scratch1 = a4;
         __ Move(scratch, result_register());
         VisitForAccumulatorValue(prop->key());
-        __ Push(scratch, result_register());
+        PushOperands(scratch, result_register());
         __ ld(scratch1, MemOperand(sp, 2 * kPointerSize));
-        __ Push(scratch1, scratch, result_register());
+        PushOperands(scratch1, scratch, result_register());
         EmitKeyedSuperPropertyLoad(prop);
         break;
       }
@@ -4353,7 +3773,7 @@
       // of the stack.
       switch (assign_type) {
         case VARIABLE:
-          __ push(v0);
+          PushOperand(v0);
           break;
         case NAMED_PROPERTY:
           __ sd(v0, MemOperand(sp, kPointerSize));
@@ -4377,9 +3797,7 @@
 
   SetExpressionPosition(expr);
 
-
-  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
-                                              strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
   CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
@@ -4413,7 +3831,7 @@
       __ mov(StoreDescriptor::ValueRegister(), result_register());
       __ li(StoreDescriptor::NameRegister(),
             Operand(prop->key()->AsLiteral()->value()));
-      __ pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4450,8 +3868,8 @@
     }
     case KEYED_PROPERTY: {
       __ mov(StoreDescriptor::ValueRegister(), result_register());
-      __ Pop(StoreDescriptor::ReceiverRegister(),
-             StoreDescriptor::NameRegister());
+      PopOperands(StoreDescriptor::ReceiverRegister(),
+                  StoreDescriptor::NameRegister());
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
@@ -4506,8 +3924,8 @@
     __ LoadRoot(at, Heap::kFalseValueRootIndex);
     Split(eq, v0, Operand(at), if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    __ Branch(if_true, eq, v0, Operand(at));
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(if_false, eq, v0, Operand(at));
     __ JumpIfSmi(v0, if_false);
     // Check for undetectable objects => true.
     __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -4573,7 +3991,7 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      __ CallRuntime(Runtime::kHasProperty);
+      CallRuntimeWithOperands(Runtime::kHasProperty);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(a4, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
@@ -4582,7 +4000,7 @@
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
       __ mov(a0, result_register());
-      __ pop(a1);
+      PopOperand(a1);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4595,7 +4013,7 @@
       VisitForAccumulatorValue(expr->right());
       Condition cc = CompareIC::ComputeCondition(op);
       __ mov(a0, result_register());
-      __ pop(a1);
+      PopOperand(a1);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4607,8 +4025,7 @@
         __ bind(&slow_case);
       }
 
-      Handle<Code> ic = CodeFactory::CompareIC(
-                            isolate(), op, strength(language_mode())).code();
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
       PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4697,7 +4114,7 @@
     DCHECK(closure_scope->is_function_scope());
     __ ld(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   }
-  __ push(at);
+  PushOperand(at);
 }
 
 
@@ -4706,21 +4123,12 @@
 
 void FullCodeGenerator::EnterFinallyBlock() {
   DCHECK(!result_register().is(a1));
-  // Store result register while executing finally block.
-  __ push(result_register());
-  // Cook return address in link register to stack (smi encoded Code* delta).
-  __ Dsubu(a1, ra, Operand(masm_->CodeObject()));
-  __ SmiTag(a1);
-
-  // Store result register while executing finally block.
-  __ push(a1);
-
   // Store pending message while executing finally block.
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ li(at, Operand(pending_message_obj));
   __ ld(a1, MemOperand(at));
-  __ push(a1);
+  PushOperand(a1);
 
   ClearPendingMessage();
 }
@@ -4729,21 +4137,11 @@
 void FullCodeGenerator::ExitFinallyBlock() {
   DCHECK(!result_register().is(a1));
   // Restore pending message from stack.
-  __ pop(a1);
+  PopOperand(a1);
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ li(at, Operand(pending_message_obj));
   __ sd(a1, MemOperand(at));
-
-  // Restore result register from stack.
-  __ pop(a1);
-
-  // Uncook return address and return.
-  __ pop(result_register());
-
-  __ SmiUntag(a1);
-  __ Daddu(at, a1, Operand(masm_->CodeObject()));
-  __ Jump(at);
 }
 
 
@@ -4763,6 +4161,31 @@
         Operand(SmiFromSlot(slot)));
 }
 
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+  __ Pop(result_register());  // Restore the accumulator.
+  __ Pop(a1);                 // Get the token.
+  for (DeferredCommand cmd : commands_) {
+    Label skip;
+    __ li(at, Operand(Smi::FromInt(cmd.token)));
+    __ Branch(&skip, ne, a1, Operand(at));
+    switch (cmd.command) {
+      case kReturn:
+        codegen_->EmitUnwindAndReturn();
+        break;
+      case kThrow:
+        __ Push(result_register());
+        __ CallRuntime(Runtime::kReThrow);
+        break;
+      case kContinue:
+        codegen_->EmitContinue(cmd.target);
+        break;
+      case kBreak:
+        codegen_->EmitBreak(cmd.target);
+        break;
+    }
+    __ bind(&skip);
+  }
+}
 
 #undef __
 
diff --git a/src/full-codegen/ppc/full-codegen-ppc.cc b/src/full-codegen/ppc/full-codegen-ppc.cc
index d9c324c..24a2a38 100644
--- a/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -19,7 +19,7 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
 
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
@@ -74,6 +74,7 @@
   }
 
  private:
+  MacroAssembler* masm() { return masm_; }
   MacroAssembler* masm_;
   Label patch_site_;
 #ifdef DEBUG
@@ -107,13 +108,6 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-    __ stop("stop-at");
-  }
-#endif
-
   if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
     int receiver_offset = info->scope()->num_parameters() * kPointerSize;
     __ LoadP(r5, MemOperand(sp, receiver_offset), r0);
@@ -142,6 +136,7 @@
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
     DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+    OperandStackDepthIncrement(locals_count);
     if (locals_count > 0) {
       if (locals_count >= 128) {
         Label ok;
@@ -269,21 +264,12 @@
   Variable* rest_param = scope()->rest_parameter(&rest_index);
   if (rest_param) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
-
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-
-    __ LoadSmiLiteral(RestParamAccessDescriptor::parameter_count(),
-                      Smi::FromInt(num_parameters));
-    __ addi(RestParamAccessDescriptor::parameter_pointer(), fp,
-            Operand(StandardFrameConstants::kCallerSPOffset + offset));
-    __ LoadSmiLiteral(RestParamAccessDescriptor::rest_parameter_index(),
-                      Smi::FromInt(rest_index));
-    function_in_register_r4 = false;
-
-    RestParamAccessStub stub(isolate());
+    if (!function_in_register_r4) {
+      __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    }
+    FastNewRestParameterStub stub(isolate());
     __ CallStub(&stub);
-
+    function_in_register_r4 = false;
     SetVar(rest_param, r3, r4, r5);
   }
 
@@ -291,28 +277,20 @@
   if (arguments != NULL) {
     // Function uses arguments object.
     Comment cmnt(masm_, "[ Allocate arguments object");
-    DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
     if (!function_in_register_r4) {
       // Load this again, if it's used by the local context below.
       __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     }
-    // Receiver is just before the parameters on the caller's stack.
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-    __ LoadSmiLiteral(ArgumentsAccessNewDescriptor::parameter_count(),
-                      Smi::FromInt(num_parameters));
-    __ addi(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
-            Operand(StandardFrameConstants::kCallerSPOffset + offset));
-
-    // Arguments to ArgumentsAccessStub:
-    //   function, parameter pointer, parameter count.
-    // The stub will rewrite parameter pointer and parameter count if the
-    // previous stack frame was an arguments adapter frame.
-    bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
-    ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
-        is_unmapped, literal()->has_duplicate_parameters());
-    ArgumentsAccessStub stub(isolate(), type);
-    __ CallStub(&stub);
+    if (is_strict(language_mode()) || !has_simple_parameters()) {
+      FastNewStrictArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    } else if (literal()->has_duplicate_parameters()) {
+      __ Push(r4);
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+    } else {
+      FastNewSloppyArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    }
 
     SetVar(arguments, r3, r4, r5);
   }
@@ -426,6 +404,31 @@
   PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
 }
 
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+    bool is_tail_call) {
+  // Pretend that the exit is a backwards jump to the entry.
+  int weight = 1;
+  if (info_->ShouldSelfOptimize()) {
+    weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+  } else {
+    int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
+    weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+  }
+  EmitProfilingCounterDecrement(weight);
+  Label ok;
+  __ cmpi(r6, Operand::Zero());
+  __ bge(&ok);
+  // Don't need to save result register if we are going to do a tail call.
+  if (!is_tail_call) {
+    __ push(r3);
+  }
+  __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+  if (!is_tail_call) {
+    __ pop(r3);
+  }
+  EmitProfilingCounterReset();
+  __ bind(&ok);
+}
 
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
@@ -439,23 +442,7 @@
       __ push(r3);
       __ CallRuntime(Runtime::kTraceExit);
     }
-    // Pretend that the exit is a backwards jump to the entry.
-    int weight = 1;
-    if (info_->ShouldSelfOptimize()) {
-      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-    } else {
-      int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
-      weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
-    }
-    EmitProfilingCounterDecrement(weight);
-    Label ok;
-    __ cmpi(r6, Operand::Zero());
-    __ bge(&ok);
-    __ push(r3);
-    __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
-    __ pop(r3);
-    EmitProfilingCounterReset();
-    __ bind(&ok);
+    EmitProfilingCounterHandlingForReturnSequence(false);
 
     // Make sure that the constant pool is not emitted inside of the return
     // sequence.
@@ -474,7 +461,7 @@
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -490,7 +477,7 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Heap::RootListIndex index) const {
   __ LoadRoot(result_register(), index);
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
@@ -522,14 +509,14 @@
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
   // Immediates cannot be pushed directly.
   __ mov(result_register(), Operand(lit));
-  __ push(result_register());
+  codegen()->PushOperand(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
   codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
                                           false_label_);
-  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ b(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -554,40 +541,14 @@
 }
 
 
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
-                                                   Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
-    int count, Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-  __ Move(result_register(), reg);
-}
-
-
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
   DCHECK(count > 0);
-  if (count > 1) __ Drop(count - 1);
+  if (count > 1) codegen()->DropOperands(count - 1);
   __ StoreP(reg, MemOperand(sp, 0));
 }
 
 
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
-                                                 Register reg) const {
-  DCHECK(count > 0);
-  // For simplicity we always test the accumulator register.
-  __ Drop(count);
-  __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
-  codegen()->DoTest(this);
-}
-
-
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
   DCHECK(materialize_true == materialize_false);
@@ -616,7 +577,7 @@
   __ bind(materialize_false);
   __ LoadRoot(ip, Heap::kFalseValueRootIndex);
   __ bind(&done);
-  __ push(ip);
+  codegen()->PushOperand(ip);
 }
 
 
@@ -638,7 +599,7 @@
   Heap::RootListIndex value_root_index =
       flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
   __ LoadRoot(ip, value_root_index);
-  __ push(ip);
+  codegen()->PushOperand(ip);
 }
 
 
@@ -750,7 +711,7 @@
   // The variable in the declaration always resides in the current function
   // context.
   DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
-  if (generate_debug_code_) {
+  if (FLAG_debug_code) {
     // Check that we're not inside a with or catch context.
     __ LoadP(r4, FieldMemOperand(cp, HeapObject::kMapOffset));
     __ CompareRoot(r4, Heap::kWithContextMapRootIndex);
@@ -865,11 +826,11 @@
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ FunctionDeclaration");
       __ mov(r5, Operand(variable->name()));
-      __ Push(r5);
+      PushOperand(r5);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
       break;
     }
   }
@@ -943,8 +904,8 @@
 
     // Record position before stub call for type feedback.
     SetExpressionPosition(clause);
-    Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
-                                             strength(language_mode())).code();
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -967,7 +928,7 @@
   // Discard the test value and jump to the default if present, otherwise to
   // the end of the statement.
   __ bind(&next_test);
-  __ Drop(1);  // Switch value is no longer needed.
+  DropOperands(1);  // Switch value is no longer needed.
   if (default_clause == NULL) {
     __ b(nested_statement.break_label());
   } else {
@@ -998,25 +959,21 @@
   ForIn loop_statement(this, stmt);
   increment_loop_depth();
 
-  // Get the object to enumerate over. If the object is null or undefined, skip
-  // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(r3, ip);
-  __ beq(&exit);
-  Register null_value = r7;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ cmp(r3, null_value);
-  __ beq(&exit);
+  OperandStackDepthIncrement(ForIn::kElementCount);
 
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
-  // Convert the object to a JS object.
+  // If the object is null or undefined, skip over the loop, otherwise convert
+  // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
   Label convert, done_convert;
   __ JumpIfSmi(r3, &convert);
   __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
   __ bge(&done_convert);
+  __ CompareRoot(r3, Heap::kNullValueRootIndex);
+  __ beq(&exit);
+  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+  __ beq(&exit);
   __ bind(&convert);
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
@@ -1024,16 +981,14 @@
   PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
   __ push(r3);
 
-  // Check for proxies.
-  Label call_runtime;
-  __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
-  __ beq(&call_runtime);
-
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  __ CheckEnumCache(null_value, &call_runtime);
+  // Note: Proxies never have an enum cache, so will always take the
+  // slow path.
+  Label call_runtime;
+  __ CheckEnumCache(&call_runtime);
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -1044,7 +999,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(r3);  // Duplicate the enumerable object on the stack.
-  __ CallRuntime(Runtime::kGetPropertyNamesFast);
+  __ CallRuntime(Runtime::kForInEnumerate);
   PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
 
   // If we got a map from the runtime call, we can do a fast
@@ -1083,16 +1038,18 @@
   // We got a fixed array in register r3. Iterate through that.
   __ bind(&fixed_array);
 
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(r4);
   __ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-  int vector_index = SmiFromSlot(slot)->value();
   __ StoreP(
       r5, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(vector_index)), r0);
   __ LoadSmiLiteral(r4, Smi::FromInt(1));  // Smi(1) indicates slow check
   __ Push(r4, r3);  // Smi and array
   __ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
+  __ Push(r4);  // Fixed array length (as smi).
+  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
   __ LoadSmiLiteral(r3, Smi::FromInt(0));
-  __ Push(r4, r3);  // Fixed array length (as smi) and initial index.
+  __ Push(r3);  // Initial index.
 
   // Generate code for doing the condition check.
   __ bind(&loop);
@@ -1122,6 +1079,17 @@
   __ cmp(r7, r5);
   __ beq(&update_each);
 
+  // We might get here from TurboFan or Crankshaft when something in the
+  // for-in loop body deopts and only now notice in fullcodegen, that we
+  // can now longer use the enum cache, i.e. left fast mode. So better record
+  // this information here, in case we later OSR back into this loop or
+  // reoptimize the whole function w/o rerunning the loop with the slow
+  // mode object in fullcodegen (which would result in a deopt loop).
+  __ EmitLoadTypeFeedbackVector(r3);
+  __ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ StoreP(
+      r5, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)), r0);
+
   // Convert the entry to a string or (smi) 0 if it isn't a property
   // any more. If the property has been removed while iterating, we
   // just skip it.
@@ -1161,7 +1129,7 @@
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
-  __ Drop(5);
+  DropOperands(5);
 
   // Exit and decrement the loop depth.
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1402,12 +1370,11 @@
       // by eval-introduced variables.
       EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
       __ bind(&slow);
-      __ mov(r4, Operand(var->name()));
-      __ Push(cp, r4);  // Context and name.
+      __ Push(var->name());
       Runtime::FunctionId function_id =
           typeof_mode == NOT_INSIDE_TYPEOF
               ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotNoReferenceError;
+              : Runtime::kLoadLookupSlotInsideTypeof;
       __ CallRuntime(function_id);
       __ bind(&done);
       context()->Plug(r3);
@@ -1432,7 +1399,7 @@
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
     __ LoadRoot(r4, Heap::kNullValueRootIndex);
-    __ push(r4);
+    PushOperand(r4);
   } else {
     VisitForStackValue(expression);
     if (NeedsHomeObject(expression)) {
@@ -1477,7 +1444,7 @@
     Literal* key = property->key()->AsLiteral();
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(r3);  // Save result on stack
+      PushOperand(r3);  // Save result on stack
       result_saved = true;
     }
     switch (property->kind()) {
@@ -1509,7 +1476,7 @@
         }
         // Duplicate receiver on stack.
         __ LoadP(r3, MemOperand(sp));
-        __ push(r3);
+        PushOperand(r3);
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
@@ -1517,19 +1484,19 @@
             EmitSetHomeObject(value, 2, property->GetSlot());
           }
           __ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY));  // PropertyAttributes
-          __ push(r3);
-          __ CallRuntime(Runtime::kSetProperty);
+          PushOperand(r3);
+          CallRuntimeWithOperands(Runtime::kSetProperty);
         } else {
-          __ Drop(3);
+          DropOperands(3);
         }
         break;
       case ObjectLiteral::Property::PROTOTYPE:
         // Duplicate receiver on stack.
         __ LoadP(r3, MemOperand(sp));
-        __ push(r3);
+        PushOperand(r3);
         VisitForStackValue(value);
         DCHECK(property->emit_store());
-        __ CallRuntime(Runtime::kInternalSetPrototype);
+        CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                                NO_REGISTERS);
         break;
@@ -1551,13 +1518,13 @@
   for (AccessorTable::Iterator it = accessor_table.begin();
        it != accessor_table.end(); ++it) {
     __ LoadP(r3, MemOperand(sp));  // Duplicate receiver.
-    __ push(r3);
+    PushOperand(r3);
     VisitForStackValue(it->first);
     EmitAccessor(it->second->getter);
     EmitAccessor(it->second->setter);
     __ LoadSmiLiteral(r3, Smi::FromInt(NONE));
-    __ push(r3);
-    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+    PushOperand(r3);
+    CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1574,18 +1541,18 @@
 
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(r3);  // Save result on the stack
+      PushOperand(r3);  // Save result on the stack
       result_saved = true;
     }
 
     __ LoadP(r3, MemOperand(sp));  // Duplicate receiver.
-    __ push(r3);
+    PushOperand(r3);
 
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
       DCHECK(!property->is_computed_name());
       VisitForStackValue(value);
       DCHECK(property->emit_store());
-      __ CallRuntime(Runtime::kInternalSetPrototype);
+      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                              NO_REGISTERS);
     } else {
@@ -1600,11 +1567,11 @@
         case ObjectLiteral::Property::MATERIALIZED_LITERAL:
         case ObjectLiteral::Property::COMPUTED:
           if (property->emit_store()) {
-            __ LoadSmiLiteral(r3, Smi::FromInt(NONE));
-            __ push(r3);
-            __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+            PushOperand(Smi::FromInt(NONE));
+            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
           } else {
-            __ Drop(3);
+            DropOperands(3);
           }
           break;
 
@@ -1613,15 +1580,13 @@
           break;
 
         case ObjectLiteral::Property::GETTER:
-          __ mov(r3, Operand(Smi::FromInt(NONE)));
-          __ push(r3);
-          __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
           break;
 
         case ObjectLiteral::Property::SETTER:
-          __ mov(r3, Operand(Smi::FromInt(NONE)));
-          __ push(r3);
-          __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
           break;
       }
     }
@@ -1680,13 +1645,13 @@
   int array_index = 0;
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
-    if (subexpr->IsSpread()) break;
+    DCHECK(!subexpr->IsSpread());
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
-      __ push(r3);
+      PushOperand(r3);
       result_saved = true;
     }
     VisitForAccumulatorValue(subexpr);
@@ -1708,21 +1673,16 @@
   // (inclusive) and these elements gets appended to the array. Note that the
   // number elements an iterable produces is unknown ahead of time.
   if (array_index < length && result_saved) {
-    __ Pop(r3);
+    PopOperand(r3);
     result_saved = false;
   }
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
 
-    __ Push(r3);
-    if (subexpr->IsSpread()) {
-      VisitForStackValue(subexpr->AsSpread()->expression());
-      __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
-                       CALL_FUNCTION);
-    } else {
-      VisitForStackValue(subexpr);
-      __ CallRuntime(Runtime::kAppendElement);
-    }
+    PushOperand(r3);
+    DCHECK(!subexpr->IsSpread());
+    VisitForStackValue(subexpr);
+    CallRuntimeWithOperands(Runtime::kAppendElement);
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
   }
@@ -1763,11 +1723,11 @@
           property->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           property->obj()->AsSuperPropertyReference()->home_object());
-      __ Push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
         const Register scratch = r4;
         __ LoadP(scratch, MemOperand(sp, kPointerSize));
-        __ Push(scratch, result_register());
+        PushOperands(scratch, result_register());
       }
       break;
     case KEYED_SUPER_PROPERTY: {
@@ -1778,11 +1738,11 @@
           property->obj()->AsSuperPropertyReference()->home_object());
       __ mr(scratch, result_register());
       VisitForAccumulatorValue(property->key());
-      __ Push(scratch, result_register());
+      PushOperands(scratch, result_register());
       if (expr->is_compound()) {
         const Register scratch1 = r5;
         __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
-        __ Push(scratch1, scratch, result_register());
+        PushOperands(scratch1, scratch, result_register());
       }
       break;
     }
@@ -1830,7 +1790,7 @@
     }
 
     Token::Value op = expr->binary_op();
-    __ push(r3);  // Left operand goes on the stack.
+    PushOperand(r3);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
 
     AccumulatorValueContext context(this);
@@ -1894,8 +1854,16 @@
 
       __ b(&suspend);
       __ bind(&continuation);
+      // When we arrive here, the stack top is the resume mode and
+      // result_register() holds the input value (the argument given to the
+      // respective resume operation).
       __ RecordGeneratorContinuation();
-      __ b(&resume);
+      __ pop(r4);
+      __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::RETURN), r0);
+      __ bne(&resume);
+      __ push(result_register());
+      EmitCreateIteratorResult(true);
+      EmitUnwindAndReturn();
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
@@ -1914,7 +1882,7 @@
       __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
-      __ pop(result_register());
+      PopOperand(result_register());
       EmitReturnSequence();
 
       __ bind(&resume);
@@ -1923,124 +1891,15 @@
     }
 
     case Yield::kFinal: {
-      VisitForAccumulatorValue(expr->generator_object());
-      __ LoadSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
-      __ StoreP(r4, FieldMemOperand(result_register(),
-                                    JSGeneratorObject::kContinuationOffset),
-                r0);
       // Pop value from top-of-stack slot, box result into result register.
+      OperandStackDepthDecrement(1);
       EmitCreateIteratorResult(true);
-      EmitUnwindBeforeReturn();
-      EmitReturnSequence();
+      EmitUnwindAndReturn();
       break;
     }
 
-    case Yield::kDelegating: {
-      VisitForStackValue(expr->generator_object());
-
-      // Initial stack layout is as follows:
-      // [sp + 1 * kPointerSize] iter
-      // [sp + 0 * kPointerSize] g
-
-      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
-      Label l_next, l_call;
-      Register load_receiver = LoadDescriptor::ReceiverRegister();
-      Register load_name = LoadDescriptor::NameRegister();
-
-      // Initial send value is undefined.
-      __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
-      __ b(&l_next);
-
-      // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
-      __ bind(&l_catch);
-      __ LoadRoot(load_name, Heap::kthrow_stringRootIndex);  // "throw"
-      __ LoadP(r6, MemOperand(sp, 1 * kPointerSize));        // iter
-      __ Push(load_name, r6, r3);  // "throw", iter, except
-      __ b(&l_call);
-
-      // try { received = %yield result }
-      // Shuffle the received result above a try handler and yield it without
-      // re-boxing.
-      __ bind(&l_try);
-      __ pop(r3);  // result
-      int handler_index = NewHandlerTableEntry();
-      EnterTryBlock(handler_index, &l_catch);
-      const int try_block_size = TryCatch::kElementCount * kPointerSize;
-      __ push(r3);  // result
-
-      __ b(&l_suspend);
-      __ bind(&l_continuation);
-      __ RecordGeneratorContinuation();
-      __ b(&l_resume);
-
-      __ bind(&l_suspend);
-      const int generator_object_depth = kPointerSize + try_block_size;
-      __ LoadP(r3, MemOperand(sp, generator_object_depth));
-      __ push(r3);  // g
-      __ Push(Smi::FromInt(handler_index));  // handler-index
-      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
-      __ LoadSmiLiteral(r4, Smi::FromInt(l_continuation.pos()));
-      __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
-                r0);
-      __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
-      __ mr(r4, cp);
-      __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
-                          kLRHasBeenSaved, kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
-      __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ pop(r3);  // result
-      EmitReturnSequence();
-      __ bind(&l_resume);  // received in r3
-      ExitTryBlock(handler_index);
-
-      // receiver = iter; f = 'next'; arg = received;
-      __ bind(&l_next);
-
-      __ LoadRoot(load_name, Heap::knext_stringRootIndex);  // "next"
-      __ LoadP(r6, MemOperand(sp, 1 * kPointerSize));       // iter
-      __ Push(load_name, r6, r3);  // "next", iter, received
-
-      // result = receiver[f](arg);
-      __ bind(&l_call);
-      __ LoadP(load_receiver, MemOperand(sp, kPointerSize));
-      __ LoadP(load_name, MemOperand(sp, 2 * kPointerSize));
-      __ mov(LoadDescriptor::SlotRegister(),
-             Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
-      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
-      CallIC(ic, TypeFeedbackId::None());
-      __ mr(r4, r3);
-      __ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
-      SetCallPosition(expr);
-      __ li(r3, Operand(1));
-      __ Call(
-          isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
-          RelocInfo::CODE_TARGET);
-
-      __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-      __ Drop(1);  // The function is still on the stack; drop it.
-
-      // if (!result.done) goto l_try;
-      __ Move(load_receiver, r3);
-
-      __ push(load_receiver);                               // save result
-      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
-      __ mov(LoadDescriptor::SlotRegister(),
-             Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);  // r0=result.done
-      Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
-      CallIC(bool_ic);
-      __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
-      __ bne(&l_try);
-
-      // result.value
-      __ pop(load_receiver);                                 // result
-      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
-      __ mov(LoadDescriptor::SlotRegister(),
-             Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);  // r3=result.value
-      context()->DropAndPlug(2, r3);  // drop iter and g
-      break;
-    }
+    case Yield::kDelegating:
+      UNREACHABLE();
   }
 }
 
@@ -2054,7 +1913,14 @@
   // r4 will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
   VisitForAccumulatorValue(value);
-  __ pop(r4);
+  PopOperand(r4);
+
+  // Store input value into generator object.
+  __ StoreP(result_register(),
+            FieldMemOperand(r4, JSGeneratorObject::kInputOffset), r0);
+  __ mr(r5, result_register());
+  __ RecordWriteField(r4, JSGeneratorObject::kInputOffset, r5, r6,
+                      kLRHasBeenSaved, kDontSaveFPRegs);
 
   // Load suspended function and context.
   __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
@@ -2121,6 +1987,7 @@
                         Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
       __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
                 r0);
+      __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
       __ Jump(ip);
       __ bind(&slow_resume);
     }
@@ -2137,6 +2004,7 @@
   __ bdnz(&operand_loop);
 
   __ bind(&call_resume);
+  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
   DCHECK(!result_register().is(r4));
   __ Push(r4, result_register());
   __ Push(Smi::FromInt(resume_mode));
@@ -2148,6 +2016,37 @@
   context()->Plug(result_register());
 }
 
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+  OperandStackDepthIncrement(2);
+  __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+                                     Register reg3) {
+  OperandStackDepthIncrement(3);
+  __ Push(reg1, reg2, reg3);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+                                     Register reg3, Register reg4) {
+  OperandStackDepthIncrement(4);
+  __ Push(reg1, reg2, reg3, reg4);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+  OperandStackDepthDecrement(2);
+  __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+  if (FLAG_debug_code) {
+    int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+                        operand_stack_depth_ * kPointerSize;
+    __ sub(r3, fp, sp);
+    __ cmpi(r3, Operand(expected_diff));
+    __ Assert(eq, kUnexpectedStackDepth);
+  }
+}
 
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
@@ -2181,37 +2080,7 @@
   __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
   __ mov(LoadDescriptor::SlotRegister(),
          Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object.
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(prop->IsSuperAccess());
-
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
-  __ mov(LoadDescriptor::SlotRegister(),
-         Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object, key.
-  SetExpressionPosition(prop);
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallLoadIC(NOT_INSIDE_TYPEOF);
 }
 
 
@@ -2227,7 +2096,7 @@
   // Get the arguments.
   Register left = r4;
   Register right = r3;
-  __ pop(left);
+  PopOperand(left);
 
   // Perform combined smi check on both operands.
   __ orx(scratch1, left, right);
@@ -2236,8 +2105,7 @@
   patch_site.EmitJumpIfSmi(scratch1, &smi_case);
 
   __ bind(&stub_call);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ b(&done);
@@ -2342,27 +2210,17 @@
 
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  // Constructor is in r3.
-  DCHECK(lit != NULL);
-  __ push(r3);
-
-  // No access check is needed here since the constructor is created by the
-  // class literal.
-  Register scratch = r4;
-  __ LoadP(scratch,
-           FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
-  __ push(scratch);
-
   for (int i = 0; i < lit->properties()->length(); i++) {
     ObjectLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
+    Register scratch = r4;
     if (property->is_static()) {
       __ LoadP(scratch, MemOperand(sp, kPointerSize));  // constructor
     } else {
       __ LoadP(scratch, MemOperand(sp, 0));  // prototype
     }
-    __ push(scratch);
+    PushOperand(scratch);
     EmitPropertyKey(property, lit->GetIdForProperty(i));
 
     // The static prototype property is read only. We handle the non computed
@@ -2385,36 +2243,31 @@
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();
       case ObjectLiteral::Property::COMPUTED:
-        __ CallRuntime(Runtime::kDefineClassMethod);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
       case ObjectLiteral::Property::GETTER:
-        __ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
-        __ push(r3);
-        __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
       case ObjectLiteral::Property::SETTER:
-        __ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
-        __ push(r3);
-        __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
       default:
         UNREACHABLE();
     }
   }
-
-  // Set both the prototype and constructor to have fast properties, and also
-  // freeze them in strong mode.
-  __ CallRuntime(Runtime::kFinalizeClassDefinition);
 }
 
 
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
-  __ pop(r4);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  PopOperand(r4);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   JumpPatchSite patch_site(masm_);  // unbound, signals no inlined smi code.
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
@@ -2437,10 +2290,10 @@
       break;
     }
     case NAMED_PROPERTY: {
-      __ push(r3);  // Preserve value.
+      PushOperand(r3);  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
       __ Move(StoreDescriptor::ReceiverRegister(), r3);
-      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
       __ mov(StoreDescriptor::NameRegister(),
              Operand(prop->key()->AsLiteral()->value()));
       EmitLoadStoreICSlot(slot);
@@ -2448,7 +2301,7 @@
       break;
     }
     case NAMED_SUPER_PROPERTY: {
-      __ Push(r3);
+      PushOperand(r3);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2465,7 +2318,7 @@
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      __ Push(r3);
+      PushOperand(r3);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForStackValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2485,12 +2338,12 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ push(r3);  // Preserve value.
+      PushOperand(r3);  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ Move(StoreDescriptor::NameRegister(), r3);
-      __ Pop(StoreDescriptor::ValueRegister(),
-             StoreDescriptor::ReceiverRegister());
+      PopOperands(StoreDescriptor::ValueRegister(),
+                  StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(slot);
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2573,17 +2426,17 @@
              (var->mode() == CONST && op == Token::INIT)) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
-      __ push(r3);  // Value.
-      __ mov(r4, Operand(var->name()));
-      __ mov(r3, Operand(Smi::FromInt(language_mode())));
-      __ Push(cp, r4, r3);  // Context, name, language mode.
-      __ CallRuntime(Runtime::kStoreLookupSlot);
+      __ Push(var->name());
+      __ Push(r3);
+      __ CallRuntime(is_strict(language_mode())
+                         ? Runtime::kStoreLookupSlot_Strict
+                         : Runtime::kStoreLookupSlot_Sloppy);
     } else {
       // Assignment to var or initializing assignment to let/const in harmony
       // mode.
       DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
       MemOperand location = VarOperand(var, r4);
-      if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
         // Check for an uninitialized let binding.
         __ LoadP(r5, location);
         __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
@@ -2628,7 +2481,7 @@
 
   __ mov(StoreDescriptor::NameRegister(),
          Operand(prop->key()->AsLiteral()->value()));
-  __ pop(StoreDescriptor::ReceiverRegister());
+  PopOperand(StoreDescriptor::ReceiverRegister());
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
@@ -2645,10 +2498,11 @@
   Literal* key = prop->key()->AsLiteral();
   DCHECK(key != NULL);
 
-  __ Push(key->value());
-  __ Push(r3);
-  __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
-                                             : Runtime::kStoreToSuper_Sloppy));
+  PushOperand(key->value());
+  PushOperand(r3);
+  CallRuntimeWithOperands((is_strict(language_mode())
+                               ? Runtime::kStoreToSuper_Strict
+                               : Runtime::kStoreToSuper_Sloppy));
 }
 
 
@@ -2658,16 +2512,17 @@
   // stack : receiver ('this'), home_object, key
   DCHECK(prop != NULL);
 
-  __ Push(r3);
-  __ CallRuntime((is_strict(language_mode())
-                      ? Runtime::kStoreKeyedToSuper_Strict
-                      : Runtime::kStoreKeyedToSuper_Sloppy));
+  PushOperand(r3);
+  CallRuntimeWithOperands((is_strict(language_mode())
+                               ? Runtime::kStoreKeyedToSuper_Strict
+                               : Runtime::kStoreKeyedToSuper_Sloppy));
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
-  __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+  PopOperands(StoreDescriptor::ReceiverRegister(),
+              StoreDescriptor::NameRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(r3));
 
   Handle<Code> ic =
@@ -2702,7 +2557,7 @@
       VisitForStackValue(expr->obj());
       VisitForAccumulatorValue(expr->key());
       __ Move(LoadDescriptor::NameRegister(), r3);
-      __ pop(LoadDescriptor::ReceiverRegister());
+      PopOperand(LoadDescriptor::ReceiverRegister());
       EmitKeyedPropertyLoad(expr);
     } else {
       VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2738,7 +2593,7 @@
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
     __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-    __ push(r0);
+    PushOperand(r0);
     convert_mode = ConvertReceiverMode::kNullOrUndefined;
   } else {
     // Load the function from the receiver.
@@ -2749,7 +2604,7 @@
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
     __ LoadP(r0, MemOperand(sp, 0));
-    __ push(r0);
+    PushOperand(r0);
     __ StoreP(r3, MemOperand(sp, kPointerSize));
     convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
   }
@@ -2773,9 +2628,8 @@
   VisitForAccumulatorValue(super_ref->home_object());
   __ mr(scratch, r3);
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(scratch, r3, r3, scratch);
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
+  PushOperands(scratch, r3, r3, scratch);
+  PushOperand(key->value());
 
   // Stack here:
   //  - home_object
@@ -2783,8 +2637,7 @@
   //  - this (receiver) <-- LoadFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
 
   // Replace home_object with target function.
   __ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2812,7 +2665,7 @@
 
   // Push the target function under the receiver.
   __ LoadP(ip, MemOperand(sp, 0));
-  __ push(ip);
+  PushOperand(ip);
   __ StoreP(r3, MemOperand(sp, kPointerSize));
 
   EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2832,9 +2685,8 @@
   VisitForAccumulatorValue(super_ref->home_object());
   __ mr(scratch, r3);
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(scratch, r3, r3, scratch);
+  PushOperands(scratch, r3, r3, scratch);
   VisitForStackValue(prop->key());
-  __ Push(Smi::FromInt(language_mode()));
 
   // Stack here:
   //  - home_object
@@ -2842,8 +2694,7 @@
   //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
 
   // Replace home_object with target function.
   __ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2865,12 +2716,23 @@
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
   SetCallPosition(expr);
-  Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+  if (expr->tail_call_mode() == TailCallMode::kAllow) {
+    if (FLAG_trace) {
+      __ CallRuntime(Runtime::kTraceTailCall);
+    }
+    // Update profiling counters before the tail call since we will
+    // not return to this function.
+    EmitProfilingCounterHandlingForReturnSequence(true);
+  }
+  Handle<Code> ic =
+      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+          .code();
   __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
   __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
   // Don't assign a type feedback id to the IC, since type feedback is provided
   // by the vector above.
   CallIC(ic);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2915,11 +2777,9 @@
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in r3) and
     // the object holding it (returned in r4).
-    DCHECK(!context_register().is(r5));
-    __ mov(r5, Operand(callee->name()));
-    __ Push(context_register(), r5);
-    __ CallRuntime(Runtime::kLoadLookupSlot);
-    __ Push(r3, r4);  // Function, receiver.
+    __ Push(callee->name());
+    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+    PushOperands(r3, r4);  // Function, receiver.
     PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the function
@@ -2941,7 +2801,7 @@
     VisitForStackValue(callee);
     // refEnv.WithBaseObject()
     __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
-    __ push(r5);  // Reserved receiver slot.
+    PushOperand(r5);  // Reserved receiver slot.
   }
 }
 
@@ -2975,7 +2835,10 @@
   SetCallPosition(expr);
   __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
   __ mov(r3, Operand(arg_count));
-  __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                      expr->tail_call_mode()),
+          RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   // Restore context register.
   __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3016,6 +2879,7 @@
 
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   // Restore context register.
   __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3036,7 +2900,7 @@
            FieldMemOperand(result_register(), HeapObject::kMapOffset));
   __ LoadP(result_register(),
            FieldMemOperand(result_register(), Map::kPrototypeOffset));
-  __ Push(result_register());
+  PushOperand(result_register());
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = expr->arguments();
@@ -3058,6 +2922,7 @@
   __ LoadP(r4, MemOperand(sp, arg_count * kPointerSize));
 
   __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
 
@@ -3110,87 +2975,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(r3, if_false);
-  __ CompareObjectType(r3, r4, r4, SIMD128_VALUE_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(r3, if_false);
-  __ CompareObjectType(r3, r4, r5, FIRST_FUNCTION_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(ge, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ CheckMap(r3, r4, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
-#if V8_TARGET_ARCH_PPC64
-  __ LoadP(r4, FieldMemOperand(r3, HeapNumber::kValueOffset));
-  __ li(r5, Operand(1));
-  __ rotrdi(r5, r5, 1);  // r5 = 0x80000000_00000000
-  __ cmp(r4, r5);
-#else
-  __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
-  __ lwz(r4, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
-  Label skip;
-  __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
-  __ cmp(r5, r0);
-  __ bne(&skip);
-  __ cmpi(r4, Operand::Zero());
-  __ bind(&skip);
-#endif
-
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3279,66 +3063,6 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ pop(r4);
-  __ cmp(r3, r4);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in r4 and the formal
-  // parameter count in r3.
-  VisitForAccumulatorValue(args->at(0));
-  __ mr(r4, r3);
-  __ LoadSmiLiteral(r3, Smi::FromInt(info_->scope()->num_parameters()));
-  ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
-  __ CallStub(&stub);
-  context()->Plug(r3);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  DCHECK(expr->arguments()->length() == 0);
-  Label exit;
-  // Get the number of formal parameters.
-  __ LoadSmiLiteral(r3, Smi::FromInt(info_->scope()->num_parameters()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
-  __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ bne(&exit);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  context()->Plug(r3);
-}
-
-
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3409,28 +3133,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = nullptr;
-  Label* if_false = nullptr;
-  Label* fall_through = nullptr;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(r3, if_false);
-  __ CompareObjectType(r3, r4, r4, JS_DATE_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(3, args->length());
@@ -3442,7 +3144,7 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(index, value);
+  PopOperands(index, value);
 
   if (FLAG_debug_code) {
     __ TestIfSmi(value, r0);
@@ -3474,7 +3176,7 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(index, value);
+  PopOperands(index, value);
 
   if (FLAG_debug_code) {
     __ TestIfSmi(value, r0);
@@ -3495,34 +3197,6 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(0));        // Load the object.
-  VisitForAccumulatorValue(args->at(1));  // Load the value.
-  __ pop(r4);                             // r3 = value. r4 = object.
-
-  Label done;
-  // If the object is a smi, return the value.
-  __ JumpIfSmi(r4, &done);
-
-  // If the object is not a value type, return the value.
-  __ CompareObjectType(r4, r5, r5, JS_VALUE_TYPE);
-  __ bne(&done);
-
-  // Store the value.
-  __ StoreP(r3, FieldMemOperand(r4, JSValue::kValueOffset), r0);
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  __ mr(r5, r3);
-  __ RecordWriteField(r4, JSValue::kValueOffset, r5, r6, kLRHasBeenSaved,
-                      kDontSaveFPRegs);
-
-  __ bind(&done);
-  context()->Plug(r3);
-}
-
-
 void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3540,26 +3214,6 @@
 }
 
 
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into r3 and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  Label convert, done_convert;
-  __ JumpIfSmi(r3, &convert);
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ CompareObjectType(r3, r4, r4, LAST_NAME_TYPE);
-  __ ble(&done_convert);
-  __ bind(&convert);
-  __ Push(r3);
-  __ CallRuntime(Runtime::kToName);
-  __ bind(&done_convert);
-  context()->Plug(r3);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3588,7 +3242,7 @@
   Register index = r3;
   Register result = r6;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3630,7 +3284,7 @@
   Register scratch = r6;
   Register result = r3;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3675,6 +3329,7 @@
   // Call the target.
   __ mov(r3, Operand(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(argc + 1);
   // Restore context register.
   __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   // Discard the function left on TOS.
@@ -3729,261 +3384,6 @@
 }
 
 
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
-  Label bailout, done, one_char_separator, long_separator, non_trivial_array,
-      not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
-      one_char_separator_loop_entry, long_separator_loop;
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  VisitForStackValue(args->at(1));
-  VisitForAccumulatorValue(args->at(0));
-
-  // All aliases of the same register have disjoint lifetimes.
-  Register array = r3;
-  Register elements = no_reg;  // Will be r3.
-  Register result = no_reg;    // Will be r3.
-  Register separator = r4;
-  Register array_length = r5;
-  Register result_pos = no_reg;  // Will be r5
-  Register string_length = r6;
-  Register string = r7;
-  Register element = r8;
-  Register elements_end = r9;
-  Register scratch1 = r10;
-  Register scratch2 = r11;
-
-  // Separator operand is on the stack.
-  __ pop(separator);
-
-  // Check that the array is a JSArray.
-  __ JumpIfSmi(array, &bailout);
-  __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
-  __ bne(&bailout);
-
-  // Check that the array has fast elements.
-  __ CheckFastElements(scratch1, scratch2, &bailout);
-
-  // If the array has length zero, return the empty string.
-  __ LoadP(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
-  __ SmiUntag(array_length);
-  __ cmpi(array_length, Operand::Zero());
-  __ bne(&non_trivial_array);
-  __ LoadRoot(r3, Heap::kempty_stringRootIndex);
-  __ b(&done);
-
-  __ bind(&non_trivial_array);
-
-  // Get the FixedArray containing array's elements.
-  elements = array;
-  __ LoadP(elements, FieldMemOperand(array, JSArray::kElementsOffset));
-  array = no_reg;  // End of array's live range.
-
-  // Check that all array elements are sequential one-byte strings, and
-  // accumulate the sum of their lengths, as a smi-encoded value.
-  __ li(string_length, Operand::Zero());
-  __ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ ShiftLeftImm(elements_end, array_length, Operand(kPointerSizeLog2));
-  __ add(elements_end, element, elements_end);
-  // Loop condition: while (element < elements_end).
-  // Live values in registers:
-  //   elements: Fixed array of strings.
-  //   array_length: Length of the fixed array of strings (not smi)
-  //   separator: Separator string
-  //   string_length: Accumulated sum of string lengths (smi).
-  //   element: Current array element.
-  //   elements_end: Array end.
-  if (generate_debug_code_) {
-    __ cmpi(array_length, Operand::Zero());
-    __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
-  }
-  __ bind(&loop);
-  __ LoadP(string, MemOperand(element));
-  __ addi(element, element, Operand(kPointerSize));
-  __ JumpIfSmi(string, &bailout);
-  __ LoadP(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-  __ LoadP(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
-
-  __ AddAndCheckForOverflow(string_length, string_length, scratch1, scratch2,
-                            r0);
-  __ BranchOnOverflow(&bailout);
-
-  __ cmp(element, elements_end);
-  __ blt(&loop);
-
-  // If array_length is 1, return elements[0], a string.
-  __ cmpi(array_length, Operand(1));
-  __ bne(&not_size_one_array);
-  __ LoadP(r3, FieldMemOperand(elements, FixedArray::kHeaderSize));
-  __ b(&done);
-
-  __ bind(&not_size_one_array);
-
-  // Live values in registers:
-  //   separator: Separator string
-  //   array_length: Length of the array.
-  //   string_length: Sum of string lengths (smi).
-  //   elements: FixedArray of strings.
-
-  // Check that the separator is a flat one-byte string.
-  __ JumpIfSmi(separator, &bailout);
-  __ LoadP(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
-  __ lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-
-  // Add (separator length times array_length) - separator length to the
-  // string_length to get the length of the result string.
-  __ LoadP(scratch1,
-           FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
-  __ sub(string_length, string_length, scratch1);
-#if V8_TARGET_ARCH_PPC64
-  __ SmiUntag(scratch1, scratch1);
-  __ Mul(scratch2, array_length, scratch1);
-  // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
-  // zero.
-  __ ShiftRightImm(ip, scratch2, Operand(31), SetRC);
-  __ bne(&bailout, cr0);
-  __ SmiTag(scratch2, scratch2);
-#else
-  // array_length is not smi but the other values are, so the result is a smi
-  __ mullw(scratch2, array_length, scratch1);
-  __ mulhw(ip, array_length, scratch1);
-  // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
-  // zero.
-  __ cmpi(ip, Operand::Zero());
-  __ bne(&bailout);
-  __ cmpwi(scratch2, Operand::Zero());
-  __ blt(&bailout);
-#endif
-
-  __ AddAndCheckForOverflow(string_length, string_length, scratch2, scratch1,
-                            r0);
-  __ BranchOnOverflow(&bailout);
-  __ SmiUntag(string_length);
-
-  // Bailout for large object allocations.
-  __ Cmpi(string_length, Operand(Page::kMaxRegularHeapObjectSize), r0);
-  __ bgt(&bailout);
-
-  // Get first element in the array to free up the elements register to be used
-  // for the result.
-  __ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  result = elements;  // End of live range for elements.
-  elements = no_reg;
-  // Live values in registers:
-  //   element: First array element
-  //   separator: Separator string
-  //   string_length: Length of result string (not smi)
-  //   array_length: Length of the array.
-  __ AllocateOneByteString(result, string_length, scratch1, scratch2,
-                           elements_end, &bailout);
-  // Prepare for looping. Set up elements_end to end of the array. Set
-  // result_pos to the position of the result where to write the first
-  // character.
-  __ ShiftLeftImm(elements_end, array_length, Operand(kPointerSizeLog2));
-  __ add(elements_end, element, elements_end);
-  result_pos = array_length;  // End of live range for array_length.
-  array_length = no_reg;
-  __ addi(result_pos, result,
-          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
-  // Check the length of the separator.
-  __ LoadP(scratch1,
-           FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
-  __ CmpSmiLiteral(scratch1, Smi::FromInt(1), r0);
-  __ beq(&one_char_separator);
-  __ bgt(&long_separator);
-
-  // Empty separator case
-  __ bind(&empty_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-
-  // Copy next array element to the result.
-  __ LoadP(string, MemOperand(element));
-  __ addi(element, element, Operand(kPointerSize));
-  __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ addi(string, string,
-          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-  __ cmp(element, elements_end);
-  __ blt(&empty_separator_loop);  // End while (element < elements_end).
-  DCHECK(result.is(r3));
-  __ b(&done);
-
-  // One-character separator case
-  __ bind(&one_char_separator);
-  // Replace separator with its one-byte character value.
-  __ lbz(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ b(&one_char_separator_loop_entry);
-
-  __ bind(&one_char_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-  //   separator: Single separator one-byte char (in lower byte).
-
-  // Copy the separator character to the result.
-  __ stb(separator, MemOperand(result_pos));
-  __ addi(result_pos, result_pos, Operand(1));
-
-  // Copy next array element to the result.
-  __ bind(&one_char_separator_loop_entry);
-  __ LoadP(string, MemOperand(element));
-  __ addi(element, element, Operand(kPointerSize));
-  __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ addi(string, string,
-          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-  __ cmpl(element, elements_end);
-  __ blt(&one_char_separator_loop);  // End while (element < elements_end).
-  DCHECK(result.is(r3));
-  __ b(&done);
-
-  // Long separator case (separator is more than one character). Entry is at the
-  // label long_separator below.
-  __ bind(&long_separator_loop);
-  // Live values in registers:
-  //   result_pos: the position to which we are currently copying characters.
-  //   element: Current array element.
-  //   elements_end: Array end.
-  //   separator: Separator string.
-
-  // Copy the separator to the result.
-  __ LoadP(string_length, FieldMemOperand(separator, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ addi(string, separator,
-          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-
-  __ bind(&long_separator);
-  __ LoadP(string, MemOperand(element));
-  __ addi(element, element, Operand(kPointerSize));
-  __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
-  __ SmiUntag(string_length);
-  __ addi(string, string,
-          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-  __ CopyBytes(string, result_pos, string_length, scratch1);
-  __ cmpl(element, elements_end);
-  __ blt(&long_separator_loop);  // End while (element < elements_end).
-  DCHECK(result.is(r3));
-  __ b(&done);
-
-  __ bind(&bailout);
-  __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
-  __ bind(&done);
-  context()->Plug(r3);
-}
-
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -4016,7 +3416,7 @@
   __ b(&done);
 
   __ bind(&runtime);
-  __ CallRuntime(Runtime::kCreateIterResultObject);
+  CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
 
   __ bind(&done);
   context()->Plug(r3);
@@ -4026,7 +3426,7 @@
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
   // Push undefined as the receiver.
   __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
-  __ push(r3);
+  PushOperand(r3);
 
   __ LoadNativeContextSlot(expr->context_index(), r3);
 }
@@ -4041,6 +3441,7 @@
   __ mov(r3, Operand(arg_count));
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 }
 
 
@@ -4054,7 +3455,7 @@
 
     // Push the target function under the receiver.
     __ LoadP(ip, MemOperand(sp, 0));
-    __ push(ip);
+    PushOperand(ip);
     __ StoreP(r3, MemOperand(sp, kPointerSize));
 
     // Push the arguments ("left-to-right").
@@ -4090,6 +3491,7 @@
         // Call the C runtime function.
         PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
         __ CallRuntime(expr->function(), arg_count);
+        OperandStackDepthDecrement(arg_count);
         context()->Plug(r3);
       }
     }
@@ -4107,9 +3509,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ CallRuntime(is_strict(language_mode())
-                           ? Runtime::kDeleteProperty_Strict
-                           : Runtime::kDeleteProperty_Sloppy);
+        CallRuntimeWithOperands(is_strict(language_mode())
+                                    ? Runtime::kDeleteProperty_Strict
+                                    : Runtime::kDeleteProperty_Sloppy);
         context()->Plug(r3);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
@@ -4130,9 +3532,7 @@
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
-          DCHECK(!context_register().is(r5));
-          __ mov(r5, Operand(var->name()));
-          __ Push(context_register(), r5);
+          __ Push(var->name());
           __ CallRuntime(Runtime::kDeleteLookupSlot);
           context()->Plug(r3);
         }
@@ -4173,6 +3573,7 @@
         Label materialize_true, materialize_false, done;
         VisitForControl(expr->expression(), &materialize_false,
                         &materialize_true, &materialize_true);
+        if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
         PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
         __ LoadRoot(r3, Heap::kTrueValueRootIndex);
@@ -4223,7 +3624,7 @@
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
       __ LoadSmiLiteral(ip, Smi::FromInt(0));
-      __ push(ip);
+      PushOperand(ip);
     }
     switch (assign_type) {
       case NAMED_PROPERTY: {
@@ -4238,10 +3639,10 @@
         VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
         VisitForAccumulatorValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
-        __ Push(result_register());
+        PushOperand(result_register());
         const Register scratch = r4;
         __ LoadP(scratch, MemOperand(sp, kPointerSize));
-        __ Push(scratch, result_register());
+        PushOperands(scratch, result_register());
         EmitNamedSuperPropertyLoad(prop);
         break;
       }
@@ -4254,9 +3655,9 @@
         const Register scratch1 = r5;
         __ mr(scratch, result_register());
         VisitForAccumulatorValue(prop->key());
-        __ Push(scratch, result_register());
+        PushOperands(scratch, result_register());
         __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
-        __ Push(scratch1, scratch, result_register());
+        PushOperands(scratch1, scratch, result_register());
         EmitKeyedSuperPropertyLoad(prop);
         break;
       }
@@ -4343,7 +3744,7 @@
       // of the stack.
       switch (assign_type) {
         case VARIABLE:
-          __ push(r3);
+          PushOperand(r3);
           break;
         case NAMED_PROPERTY:
           __ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -4367,8 +3768,7 @@
 
   SetExpressionPosition(expr);
 
-  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
-                                              strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
   CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
@@ -4402,7 +3802,7 @@
     case NAMED_PROPERTY: {
       __ mov(StoreDescriptor::NameRegister(),
              Operand(prop->key()->AsLiteral()->value()));
-      __ pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4438,8 +3838,8 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ Pop(StoreDescriptor::ReceiverRegister(),
-             StoreDescriptor::NameRegister());
+      PopOperands(StoreDescriptor::ReceiverRegister(),
+                  StoreDescriptor::NameRegister());
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
@@ -4495,8 +3895,8 @@
     __ CompareRoot(r3, Heap::kFalseValueRootIndex);
     Split(eq, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
-    __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
-    __ beq(if_true);
+    __ CompareRoot(r3, Heap::kNullValueRootIndex);
+    __ beq(if_false);
     __ JumpIfSmi(r3, if_false);
     // Check for undetectable objects => true.
     __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
@@ -4563,7 +3963,7 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      __ CallRuntime(Runtime::kHasProperty);
+      CallRuntimeWithOperands(Runtime::kHasProperty);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(r3, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
@@ -4571,7 +3971,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
-      __ pop(r4);
+      PopOperand(r4);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4583,7 +3983,7 @@
     default: {
       VisitForAccumulatorValue(expr->right());
       Condition cond = CompareIC::ComputeCondition(op);
-      __ pop(r4);
+      PopOperand(r4);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4596,8 +3996,7 @@
         __ bind(&slow_case);
       }
 
-      Handle<Code> ic = CodeFactory::CompareIC(
-                            isolate(), op, strength(language_mode())).code();
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
       PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4681,7 +4080,7 @@
     DCHECK(closure_scope->is_function_scope());
     __ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   }
-  __ push(ip);
+  PushOperand(ip);
 }
 
 
@@ -4690,23 +4089,12 @@
 
 void FullCodeGenerator::EnterFinallyBlock() {
   DCHECK(!result_register().is(r4));
-  // Store result register while executing finally block.
-  __ push(result_register());
-  // Cook return address in link register to stack (smi encoded Code* delta)
-  __ mflr(r4);
-  __ mov(ip, Operand(masm_->CodeObject()));
-  __ sub(r4, r4, ip);
-  __ SmiTag(r4);
-
-  // Store result register while executing finally block.
-  __ push(r4);
-
   // Store pending message while executing finally block.
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ mov(ip, Operand(pending_message_obj));
   __ LoadP(r4, MemOperand(ip));
-  __ push(r4);
+  PushOperand(r4);
 
   ClearPendingMessage();
 }
@@ -4715,22 +4103,11 @@
 void FullCodeGenerator::ExitFinallyBlock() {
   DCHECK(!result_register().is(r4));
   // Restore pending message from stack.
-  __ pop(r4);
+  PopOperand(r4);
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ mov(ip, Operand(pending_message_obj));
   __ StoreP(r4, MemOperand(ip));
-
-  // Restore result register from stack.
-  __ pop(r4);
-
-  // Uncook return address and return.
-  __ pop(result_register());
-  __ SmiUntag(r4);
-  __ mov(ip, Operand(masm_->CodeObject()));
-  __ add(ip, ip, r4);
-  __ mtctr(ip);
-  __ bctr();
 }
 
 
@@ -4750,6 +4127,32 @@
          Operand(SmiFromSlot(slot)));
 }
 
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+  DCHECK(!result_register().is(r4));
+  // Restore the accumulator (r3) and token (r4).
+  __ Pop(r4, result_register());
+  for (DeferredCommand cmd : commands_) {
+    Label skip;
+    __ CmpSmiLiteral(r4, Smi::FromInt(cmd.token), r0);
+    __ bne(&skip);
+    switch (cmd.command) {
+      case kReturn:
+        codegen_->EmitUnwindAndReturn();
+        break;
+      case kThrow:
+        __ Push(result_register());
+        __ CallRuntime(Runtime::kReThrow);
+        break;
+      case kContinue:
+        codegen_->EmitContinue(cmd.target);
+        break;
+      case kBreak:
+        codegen_->EmitBreak(cmd.target);
+        break;
+    }
+    __ bind(&skip);
+  }
+}
 
 #undef __
 
diff --git a/src/full-codegen/x64/full-codegen-x64.cc b/src/full-codegen/x64/full-codegen-x64.cc
index 615eb67..910b2cf 100644
--- a/src/full-codegen/x64/full-codegen-x64.cc
+++ b/src/full-codegen/x64/full-codegen-x64.cc
@@ -16,8 +16,7 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
 
 class JumpPatchSite BASE_EMBEDDED {
  public:
@@ -67,6 +66,7 @@
     __ j(cc, target, near_jump);
   }
 
+  MacroAssembler* masm() { return masm_; }
   MacroAssembler* masm_;
   Label patch_site_;
 #ifdef DEBUG
@@ -98,13 +98,6 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-    __ int3();
-  }
-#endif
-
   if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
     StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
     __ movp(rcx, args.GetReceiverOperand());
@@ -125,6 +118,7 @@
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
     DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+    OperandStackDepthIncrement(locals_count);
     if (locals_count == 1) {
       __ PushRoot(Heap::kUndefinedValueRootIndex);
     } else if (locals_count > 1) {
@@ -254,21 +248,12 @@
   Variable* rest_param = scope()->rest_parameter(&rest_index);
   if (rest_param) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
-
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-
-    __ Move(RestParamAccessDescriptor::parameter_count(),
-            Smi::FromInt(num_parameters));
-    __ leap(RestParamAccessDescriptor::parameter_pointer(),
-            Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
-    __ Move(RestParamAccessDescriptor::rest_parameter_index(),
-            Smi::FromInt(rest_index));
-    function_in_register = false;
-
-    RestParamAccessStub stub(isolate());
+    if (!function_in_register) {
+      __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+    }
+    FastNewRestParameterStub stub(isolate());
     __ CallStub(&stub);
-
+    function_in_register = false;
     SetVar(rest_param, rax, rbx, rdx);
   }
 
@@ -278,27 +263,19 @@
     // Arguments object must be allocated after the context object, in
     // case the "arguments" or ".arguments" variables are in the context.
     Comment cmnt(masm_, "[ Allocate arguments object");
-    DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
     if (!function_in_register) {
       __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
     }
-    // The receiver is just before the parameters on the caller's stack.
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-    __ Move(ArgumentsAccessNewDescriptor::parameter_count(),
-            Smi::FromInt(num_parameters));
-    __ leap(ArgumentsAccessNewDescriptor::parameter_pointer(),
-            Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
-
-    // Arguments to ArgumentsAccessStub:
-    //   function, parameter pointer, parameter count.
-    // The stub will rewrite parameter pointer and parameter count if the
-    // previous stack frame was an arguments adapter frame.
-    bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
-    ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
-        is_unmapped, literal()->has_duplicate_parameters());
-    ArgumentsAccessStub stub(isolate(), type);
-    __ CallStub(&stub);
+    if (is_strict(language_mode()) || !has_simple_parameters()) {
+      FastNewStrictArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    } else if (literal()->has_duplicate_parameters()) {
+      __ Push(rdi);
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+    } else {
+      FastNewSloppyArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    }
 
     SetVar(arguments, rax, rbx, rdx);
   }
@@ -405,6 +382,30 @@
   PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
 }
 
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+    bool is_tail_call) {
+  // Pretend that the exit is a backwards jump to the entry.
+  int weight = 1;
+  if (info_->ShouldSelfOptimize()) {
+    weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+  } else {
+    int distance = masm_->pc_offset();
+    weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+  }
+  EmitProfilingCounterDecrement(weight);
+  Label ok;
+  __ j(positive, &ok, Label::kNear);
+  // Don't need to save result register if we are going to do a tail call.
+  if (!is_tail_call) {
+    __ Push(rax);
+  }
+  __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+  if (!is_tail_call) {
+    __ Pop(rax);
+  }
+  EmitProfilingCounterReset();
+  __ bind(&ok);
+}
 
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
@@ -416,24 +417,7 @@
       __ Push(rax);
       __ CallRuntime(Runtime::kTraceExit);
     }
-    // Pretend that the exit is a backwards jump to the entry.
-    int weight = 1;
-    if (info_->ShouldSelfOptimize()) {
-      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-    } else {
-      int distance = masm_->pc_offset();
-      weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kCodeSizeMultiplier));
-    }
-    EmitProfilingCounterDecrement(weight);
-    Label ok;
-    __ j(positive, &ok, Label::kNear);
-    __ Push(rax);
-    __ call(isolate()->builtins()->InterruptCheck(),
-            RelocInfo::CODE_TARGET);
-    __ Pop(rax);
-    EmitProfilingCounterReset();
-    __ bind(&ok);
+    EmitProfilingCounterHandlingForReturnSequence(false);
 
     SetReturnPosition(literal());
     __ leave();
@@ -448,7 +432,7 @@
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   MemOperand operand = codegen()->VarOperand(var, result_register());
-  __ Push(operand);
+  codegen()->PushOperand(operand);
 }
 
 
@@ -464,6 +448,7 @@
 
 void FullCodeGenerator::StackValueContext::Plug(
     Heap::RootListIndex index) const {
+  codegen()->OperandStackDepthIncrement(1);
   __ PushRoot(index);
 }
 
@@ -501,6 +486,7 @@
 
 
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+  codegen()->OperandStackDepthIncrement(1);
   if (lit->IsSmi()) {
     __ SafePush(Smi::cast(*lit));
   } else {
@@ -514,7 +500,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -539,41 +525,14 @@
 }
 
 
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
-                                                   Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
-    int count,
-    Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-  __ Move(result_register(), reg);
-}
-
-
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
   DCHECK(count > 0);
-  if (count > 1) __ Drop(count - 1);
+  if (count > 1) codegen()->DropOperands(count - 1);
   __ movp(Operand(rsp, 0), reg);
 }
 
 
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
-                                                 Register reg) const {
-  DCHECK(count > 0);
-  // For simplicity we always test the accumulator register.
-  __ Drop(count);
-  __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
-  codegen()->DoTest(this);
-}
-
-
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
   DCHECK(materialize_true == materialize_false);
@@ -597,6 +556,7 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
+  codegen()->OperandStackDepthIncrement(1);
   Label done;
   __ bind(materialize_true);
   __ Push(isolate()->factory()->true_value());
@@ -622,6 +582,7 @@
 
 
 void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+  codegen()->OperandStackDepthIncrement(1);
   Heap::RootListIndex value_root_index =
       flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
   __ PushRoot(value_root_index);
@@ -743,7 +704,7 @@
 void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
   // The variable in the declaration always resides in the current context.
   DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
-  if (generate_debug_code_) {
+  if (FLAG_debug_code) {
     // Check that we're not inside a with or catch context.
     __ movp(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
     __ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
@@ -859,10 +820,10 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ FunctionDeclaration");
-      __ Push(variable->name());
+      PushOperand(variable->name());
       VisitForStackValue(declaration->fun());
-      __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
       break;
     }
   }
@@ -936,8 +897,8 @@
 
     // Record position before stub call for type feedback.
     SetExpressionPosition(clause);
-    Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
-                                             strength(language_mode())).code();
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -959,7 +920,7 @@
   // Discard the test value and jump to the default if present, otherwise to
   // the end of the statement.
   __ bind(&next_test);
-  __ Drop(1);  // Switch value is no longer needed.
+  DropOperands(1);  // Switch value is no longer needed.
   if (default_clause == NULL) {
     __ jmp(nested_statement.break_label());
   } else {
@@ -990,24 +951,21 @@
   ForIn loop_statement(this, stmt);
   increment_loop_depth();
 
-  // Get the object to enumerate over. If the object is null or undefined, skip
-  // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &exit);
-  Register null_value = rdi;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ cmpp(rax, null_value);
-  __ j(equal, &exit);
+  OperandStackDepthIncrement(ForIn::kElementCount);
 
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
-  // Convert the object to a JS object.
+  // If the object is null or undefined, skip over the loop, otherwise convert
+  // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
   Label convert, done_convert;
   __ JumpIfSmi(rax, &convert, Label::kNear);
   __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
   __ j(above_equal, &done_convert, Label::kNear);
+  __ CompareRoot(rax, Heap::kNullValueRootIndex);
+  __ j(equal, &exit);
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &exit);
   __ bind(&convert);
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
@@ -1015,16 +973,14 @@
   PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
   __ Push(rax);
 
-  // Check for proxies.
-  Label call_runtime;
-  __ CmpObjectType(rax, JS_PROXY_TYPE, rcx);
-  __ j(equal, &call_runtime);
-
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  __ CheckEnumCache(null_value, &call_runtime);
+  // Note: Proxies never have an enum cache, so will always take the
+  // slow path.
+  Label call_runtime;
+  __ CheckEnumCache(&call_runtime);
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -1035,7 +991,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ Push(rax);  // Duplicate the enumerable object on the stack.
-  __ CallRuntime(Runtime::kGetPropertyNamesFast);
+  __ CallRuntime(Runtime::kForInEnumerate);
   PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
 
   // If we got a map from the runtime call, we can do a fast
@@ -1074,8 +1030,8 @@
   __ bind(&fixed_array);
 
   // No need for a write barrier, we are storing a Smi in the feedback vector.
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(rbx);
-  int vector_index = SmiFromSlot(slot)->value();
   __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(vector_index)),
           TypeFeedbackVector::MegamorphicSentinel(isolate()));
   __ movp(rcx, Operand(rsp, 0 * kPointerSize));  // Get enumerated object
@@ -1083,6 +1039,7 @@
   __ Push(rax);  // Array
   __ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
   __ Push(rax);  // Fixed array length (as smi).
+  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
   __ Push(Smi::FromInt(0));  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1112,6 +1069,16 @@
   __ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
+  // We might get here from TurboFan or Crankshaft when something in the
+  // for-in loop body deopts and only now notice in fullcodegen, that we
+  // can now longer use the enum cache, i.e. left fast mode. So better record
+  // this information here, in case we later OSR back into this loop or
+  // reoptimize the whole function w/o rerunning the loop with the slow
+  // mode object in fullcodegen (which would result in a deopt loop).
+  __ EmitLoadTypeFeedbackVector(rdx);
+  __ Move(FieldOperand(rdx, FixedArray::OffsetOfElementAt(vector_index)),
+          TypeFeedbackVector::MegamorphicSentinel(isolate()));
+
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
   // just skip it.
@@ -1149,6 +1116,7 @@
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
   __ addp(rsp, Immediate(5 * kPointerSize));
+  OperandStackDepthDecrement(ForIn::kElementCount);
 
   // Exit and decrement the loop depth.
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1391,12 +1359,11 @@
       // by eval-introduced variables.
       EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
       __ bind(&slow);
-      __ Push(rsi);  // Context.
       __ Push(var->name());
       Runtime::FunctionId function_id =
           typeof_mode == NOT_INSIDE_TYPEOF
               ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotNoReferenceError;
+              : Runtime::kLoadLookupSlotInsideTypeof;
       __ CallRuntime(function_id);
       __ bind(&done);
       context()->Plug(rax);
@@ -1421,6 +1388,7 @@
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
+    OperandStackDepthIncrement(1);
     __ PushRoot(Heap::kNullValueRootIndex);
   } else {
     VisitForStackValue(expression);
@@ -1469,7 +1437,7 @@
     Literal* key = property->key()->AsLiteral();
     Expression* value = property->value();
     if (!result_saved) {
-      __ Push(rax);  // Save result on the stack
+      PushOperand(rax);  // Save result on the stack
       result_saved = true;
     }
     switch (property->kind()) {
@@ -1499,24 +1467,24 @@
           }
           break;
         }
-        __ Push(Operand(rsp, 0));  // Duplicate receiver.
+        PushOperand(Operand(rsp, 0));  // Duplicate receiver.
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
           if (NeedsHomeObject(value)) {
             EmitSetHomeObject(value, 2, property->GetSlot());
           }
-          __ Push(Smi::FromInt(SLOPPY));  // Language mode
-          __ CallRuntime(Runtime::kSetProperty);
+          PushOperand(Smi::FromInt(SLOPPY));  // Language mode
+          CallRuntimeWithOperands(Runtime::kSetProperty);
         } else {
-          __ Drop(3);
+          DropOperands(3);
         }
         break;
       case ObjectLiteral::Property::PROTOTYPE:
-        __ Push(Operand(rsp, 0));  // Duplicate receiver.
+        PushOperand(Operand(rsp, 0));  // Duplicate receiver.
         VisitForStackValue(value);
         DCHECK(property->emit_store());
-        __ CallRuntime(Runtime::kInternalSetPrototype);
+        CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                                NO_REGISTERS);
         break;
@@ -1538,12 +1506,12 @@
   for (AccessorTable::Iterator it = accessor_table.begin();
        it != accessor_table.end();
        ++it) {
-    __ Push(Operand(rsp, 0));  // Duplicate receiver.
+    PushOperand(Operand(rsp, 0));  // Duplicate receiver.
     VisitForStackValue(it->first);
     EmitAccessor(it->second->getter);
     EmitAccessor(it->second->setter);
-    __ Push(Smi::FromInt(NONE));
-    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+    PushOperand(Smi::FromInt(NONE));
+    CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1560,17 +1528,17 @@
 
     Expression* value = property->value();
     if (!result_saved) {
-      __ Push(rax);  // Save result on the stack
+      PushOperand(rax);  // Save result on the stack
       result_saved = true;
     }
 
-    __ Push(Operand(rsp, 0));  // Duplicate receiver.
+    PushOperand(Operand(rsp, 0));  // Duplicate receiver.
 
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
       DCHECK(!property->is_computed_name());
       VisitForStackValue(value);
       DCHECK(property->emit_store());
-      __ CallRuntime(Runtime::kInternalSetPrototype);
+      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                              NO_REGISTERS);
     } else {
@@ -1585,10 +1553,11 @@
         case ObjectLiteral::Property::MATERIALIZED_LITERAL:
         case ObjectLiteral::Property::COMPUTED:
           if (property->emit_store()) {
-            __ Push(Smi::FromInt(NONE));
-            __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+            PushOperand(Smi::FromInt(NONE));
+            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
           } else {
-            __ Drop(3);
+            DropOperands(3);
           }
           break;
 
@@ -1597,13 +1566,13 @@
           break;
 
         case ObjectLiteral::Property::GETTER:
-          __ Push(Smi::FromInt(NONE));
-          __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
           break;
 
         case ObjectLiteral::Property::SETTER:
-          __ Push(Smi::FromInt(NONE));
-          __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
           break;
       }
     }
@@ -1661,14 +1630,14 @@
   int array_index = 0;
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
-    if (subexpr->IsSpread()) break;
+    DCHECK(!subexpr->IsSpread());
 
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
-      __ Push(rax);  // array literal
+      PushOperand(rax);  // array literal
       result_saved = true;
     }
     VisitForAccumulatorValue(subexpr);
@@ -1689,21 +1658,16 @@
   // (inclusive) and these elements gets appended to the array. Note that the
   // number elements an iterable produces is unknown ahead of time.
   if (array_index < length && result_saved) {
-    __ Pop(rax);
+    PopOperand(rax);
     result_saved = false;
   }
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
 
-    __ Push(rax);
-    if (subexpr->IsSpread()) {
-      VisitForStackValue(subexpr->AsSpread()->expression());
-      __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
-                       CALL_FUNCTION);
-    } else {
-      VisitForStackValue(subexpr);
-      __ CallRuntime(Runtime::kAppendElement);
-    }
+    PushOperand(rax);
+    DCHECK(!subexpr->IsSpread());
+    VisitForStackValue(subexpr);
+    CallRuntimeWithOperands(Runtime::kAppendElement);
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
   }
@@ -1744,10 +1708,10 @@
           property->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           property->obj()->AsSuperPropertyReference()->home_object());
-      __ Push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
-        __ Push(MemOperand(rsp, kPointerSize));
-        __ Push(result_register());
+        PushOperand(MemOperand(rsp, kPointerSize));
+        PushOperand(result_register());
       }
       break;
     case KEYED_SUPER_PROPERTY:
@@ -1756,11 +1720,11 @@
       VisitForStackValue(
           property->obj()->AsSuperPropertyReference()->home_object());
       VisitForAccumulatorValue(property->key());
-      __ Push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
-        __ Push(MemOperand(rsp, 2 * kPointerSize));
-        __ Push(MemOperand(rsp, 2 * kPointerSize));
-        __ Push(result_register());
+        PushOperand(MemOperand(rsp, 2 * kPointerSize));
+        PushOperand(MemOperand(rsp, 2 * kPointerSize));
+        PushOperand(result_register());
       }
       break;
     case KEYED_PROPERTY: {
@@ -1806,7 +1770,7 @@
     }
 
     Token::Value op = expr->binary_op();
-    __ Push(rax);  // Left operand goes on the stack.
+    PushOperand(rax);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
 
     AccumulatorValueContext context(this);
@@ -1871,8 +1835,16 @@
 
       __ jmp(&suspend);
       __ bind(&continuation);
+      // When we arrive here, the stack top is the resume mode and
+      // result_register() holds the input value (the argument given to the
+      // respective resume operation).
       __ RecordGeneratorContinuation();
-      __ jmp(&resume);
+      __ Pop(rbx);
+      __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::RETURN));
+      __ j(not_equal, &resume);
+      __ Push(result_register());
+      EmitCreateIteratorResult(true);
+      EmitUnwindAndReturn();
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
@@ -1892,7 +1864,7 @@
               Operand(rbp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
 
-      __ Pop(result_register());
+      PopOperand(result_register());
       EmitReturnSequence();
 
       __ bind(&resume);
@@ -1901,131 +1873,21 @@
     }
 
     case Yield::kFinal: {
-      VisitForAccumulatorValue(expr->generator_object());
-      __ Move(FieldOperand(result_register(),
-                           JSGeneratorObject::kContinuationOffset),
-              Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
       // Pop value from top-of-stack slot, box result into result register.
+      OperandStackDepthDecrement(1);
       EmitCreateIteratorResult(true);
-      EmitUnwindBeforeReturn();
-      EmitReturnSequence();
+      EmitUnwindAndReturn();
       break;
     }
 
-    case Yield::kDelegating: {
-      VisitForStackValue(expr->generator_object());
-
-      // Initial stack layout is as follows:
-      // [sp + 1 * kPointerSize] iter
-      // [sp + 0 * kPointerSize] g
-
-      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
-      Label l_next, l_call, l_loop;
-      Register load_receiver = LoadDescriptor::ReceiverRegister();
-      Register load_name = LoadDescriptor::NameRegister();
-
-      // Initial send value is undefined.
-      __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-      __ jmp(&l_next);
-
-      // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
-      __ bind(&l_catch);
-      __ LoadRoot(load_name, Heap::kthrow_stringRootIndex);  // "throw"
-      __ Push(load_name);
-      __ Push(Operand(rsp, 2 * kPointerSize));               // iter
-      __ Push(rax);                                          // exception
-      __ jmp(&l_call);
-
-      // try { received = %yield result }
-      // Shuffle the received result above a try handler and yield it without
-      // re-boxing.
-      __ bind(&l_try);
-      __ Pop(rax);                                       // result
-      int handler_index = NewHandlerTableEntry();
-      EnterTryBlock(handler_index, &l_catch);
-      const int try_block_size = TryCatch::kElementCount * kPointerSize;
-      __ Push(rax);                                      // result
-
-      __ jmp(&l_suspend);
-      __ bind(&l_continuation);
-      __ RecordGeneratorContinuation();
-      __ jmp(&l_resume);
-
-      __ bind(&l_suspend);
-      const int generator_object_depth = kPointerSize + try_block_size;
-      __ movp(rax, Operand(rsp, generator_object_depth));
-      __ Push(rax);                                      // g
-      __ Push(Smi::FromInt(handler_index));              // handler-index
-      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
-      __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
-              Smi::FromInt(l_continuation.pos()));
-      __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
-      __ movp(rcx, rsi);
-      __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
-                          kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
-      __ movp(context_register(),
-              Operand(rbp, StandardFrameConstants::kContextOffset));
-      __ Pop(rax);                                       // result
-      EmitReturnSequence();
-      __ bind(&l_resume);                                // received in rax
-      ExitTryBlock(handler_index);
-
-      // receiver = iter; f = 'next'; arg = received;
-      __ bind(&l_next);
-
-      __ LoadRoot(load_name, Heap::knext_stringRootIndex);
-      __ Push(load_name);                           // "next"
-      __ Push(Operand(rsp, 2 * kPointerSize));      // iter
-      __ Push(rax);                                 // received
-
-      // result = receiver[f](arg);
-      __ bind(&l_call);
-      __ movp(load_receiver, Operand(rsp, kPointerSize));
-      __ Move(LoadDescriptor::SlotRegister(),
-              SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
-      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
-      CallIC(ic, TypeFeedbackId::None());
-      __ movp(rdi, rax);
-      __ movp(Operand(rsp, 2 * kPointerSize), rdi);
-
-      SetCallPosition(expr);
-      __ Set(rax, 1);
-      __ Call(
-          isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
-          RelocInfo::CODE_TARGET);
-
-      __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-      __ Drop(1);  // The function is still on the stack; drop it.
-
-      // if (!result.done) goto l_try;
-      __ bind(&l_loop);
-      __ Move(load_receiver, rax);
-      __ Push(load_receiver);                               // save result
-      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
-      __ Move(LoadDescriptor::SlotRegister(),
-              SmiFromSlot(expr->DoneFeedbackSlot()));
-      CallLoadIC(NOT_INSIDE_TYPEOF);  // rax=result.done
-      Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
-      CallIC(bool_ic);
-      __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
-      __ j(not_equal, &l_try);
-
-      // result.value
-      __ Pop(load_receiver);                             // result
-      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
-      __ Move(LoadDescriptor::SlotRegister(),
-              SmiFromSlot(expr->ValueFeedbackSlot()));
-      CallLoadIC(NOT_INSIDE_TYPEOF);                     // result.value in rax
-      context()->DropAndPlug(2, rax);                    // drop iter and g
-      break;
-    }
+    case Yield::kDelegating:
+      UNREACHABLE();
   }
 }
 
 
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
-    Expression *value,
+void FullCodeGenerator::EmitGeneratorResume(
+    Expression* generator, Expression* value,
     JSGeneratorObject::ResumeMode resume_mode) {
   // The value stays in rax, and is ultimately read by the resumed generator, as
   // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
@@ -2033,7 +1895,14 @@
   // rbx will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
   VisitForAccumulatorValue(value);
-  __ Pop(rbx);
+  PopOperand(rbx);
+
+  // Store input value into generator object.
+  __ movp(FieldOperand(rbx, JSGeneratorObject::kInputOffset),
+          result_register());
+  __ movp(rcx, result_register());
+  __ RecordWriteField(rbx, JSGeneratorObject::kInputOffset, rcx, rdx,
+                      kDontSaveFPRegs);
 
   // Load suspended function and context.
   __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
@@ -2083,6 +1952,7 @@
     __ addp(rdx, rcx);
     __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
             Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
     __ jmp(rdx);
     __ bind(&slow_resume);
   }
@@ -2096,6 +1966,7 @@
   __ Push(rcx);
   __ jmp(&push_operand_holes);
   __ bind(&call_resume);
+  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
   __ Push(rbx);
   __ Push(result_register());
   __ Push(Smi::FromInt(resume_mode));
@@ -2107,6 +1978,21 @@
   context()->Plug(result_register());
 }
 
+void FullCodeGenerator::PushOperand(MemOperand operand) {
+  OperandStackDepthIncrement(1);
+  __ Push(operand);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+  if (FLAG_debug_code) {
+    int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+                        operand_stack_depth_ * kPointerSize;
+    __ movp(rax, rbp);
+    __ subp(rax, rsp);
+    __ cmpp(rax, Immediate(expected_diff));
+    __ Assert(equal, kUnexpectedStackDepth);
+  }
+}
 
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
@@ -2134,42 +2020,13 @@
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetExpressionPosition(prop);
   Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
   DCHECK(!prop->IsSuperAccess());
 
   __ Move(LoadDescriptor::NameRegister(), key->value());
   __ Move(LoadDescriptor::SlotRegister(),
           SmiFromSlot(prop->PropertyFeedbackSlot()));
-  CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(prop->IsSuperAccess());
-
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
-  __ Move(LoadDescriptor::SlotRegister(),
-          SmiFromSlot(prop->PropertyFeedbackSlot()));
-  CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object, key.
-  SetExpressionPosition(prop);
-  __ Push(Smi::FromInt(language_mode()));
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallLoadIC(NOT_INSIDE_TYPEOF);
 }
 
 
@@ -2181,7 +2038,7 @@
   // stack (popped into rdx). Right operand is in rax but moved into
   // rcx to make the shifts easier.
   Label done, stub_call, smi_case;
-  __ Pop(rdx);
+  PopOperand(rdx);
   __ movp(rcx, rax);
   __ orp(rax, rdx);
   JumpPatchSite patch_site(masm_);
@@ -2189,8 +2046,7 @@
 
   __ bind(&stub_call);
   __ movp(rax, rcx);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ jmp(&done, Label::kNear);
@@ -2235,24 +2091,14 @@
 
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  // Constructor is in rax.
-  DCHECK(lit != NULL);
-  __ Push(rax);
-
-  // No access check is needed here since the constructor is created by the
-  // class literal.
-  Register scratch = rbx;
-  __ movp(scratch, FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset));
-  __ Push(scratch);
-
   for (int i = 0; i < lit->properties()->length(); i++) {
     ObjectLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     if (property->is_static()) {
-      __ Push(Operand(rsp, kPointerSize));  // constructor
+      PushOperand(Operand(rsp, kPointerSize));  // constructor
     } else {
-      __ Push(Operand(rsp, 0));  // prototype
+      PushOperand(Operand(rsp, 0));  // prototype
     }
     EmitPropertyKey(property, lit->GetIdForProperty(i));
 
@@ -2276,34 +2122,31 @@
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();
       case ObjectLiteral::Property::COMPUTED:
-        __ CallRuntime(Runtime::kDefineClassMethod);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
       case ObjectLiteral::Property::GETTER:
-        __ Push(Smi::FromInt(DONT_ENUM));
-        __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
       case ObjectLiteral::Property::SETTER:
-        __ Push(Smi::FromInt(DONT_ENUM));
-        __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
 
       default:
         UNREACHABLE();
     }
   }
-
-  // Set both the prototype and constructor to have fast properties, and also
-  // freeze them in strong mode.
-  __ CallRuntime(Runtime::kFinalizeClassDefinition);
 }
 
 
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
-  __ Pop(rdx);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  PopOperand(rdx);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
@@ -2326,10 +2169,10 @@
       break;
     }
     case NAMED_PROPERTY: {
-      __ Push(rax);  // Preserve value.
+      PushOperand(rax);  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
       __ Move(StoreDescriptor::ReceiverRegister(), rax);
-      __ Pop(StoreDescriptor::ValueRegister());  // Restore value.
+      PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
       __ Move(StoreDescriptor::NameRegister(),
               prop->key()->AsLiteral()->value());
       EmitLoadStoreICSlot(slot);
@@ -2337,7 +2180,7 @@
       break;
     }
     case NAMED_SUPER_PROPERTY: {
-      __ Push(rax);
+      PushOperand(rax);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2354,7 +2197,7 @@
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      __ Push(rax);
+      PushOperand(rax);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForStackValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2374,12 +2217,12 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ Push(rax);  // Preserve value.
+      PushOperand(rax);  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ Move(StoreDescriptor::NameRegister(), rax);
-      __ Pop(StoreDescriptor::ReceiverRegister());
-      __ Pop(StoreDescriptor::ValueRegister());  // Restore value.
+      PopOperand(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
       EmitLoadStoreICSlot(slot);
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2456,17 +2299,17 @@
              (var->mode() == CONST && op == Token::INIT)) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
-      __ Push(rax);  // Value.
-      __ Push(rsi);  // Context.
       __ Push(var->name());
-      __ Push(Smi::FromInt(language_mode()));
-      __ CallRuntime(Runtime::kStoreLookupSlot);
+      __ Push(rax);
+      __ CallRuntime(is_strict(language_mode())
+                         ? Runtime::kStoreLookupSlot_Strict
+                         : Runtime::kStoreLookupSlot_Sloppy);
     } else {
       // Assignment to var or initializing assignment to let/const in harmony
       // mode.
       DCHECK(var->IsStackAllocated() || var->IsContextSlot());
       MemOperand location = VarOperand(var, rcx);
-      if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
         // Check for an uninitialized let binding.
         __ movp(rdx, location);
         __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
@@ -2511,7 +2354,7 @@
   DCHECK(prop->key()->IsLiteral());
 
   __ Move(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
-  __ Pop(StoreDescriptor::ReceiverRegister());
+  PopOperand(StoreDescriptor::ReceiverRegister());
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
 
@@ -2528,10 +2371,11 @@
   Literal* key = prop->key()->AsLiteral();
   DCHECK(key != NULL);
 
-  __ Push(key->value());
-  __ Push(rax);
-  __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
-                                             : Runtime::kStoreToSuper_Sloppy));
+  PushOperand(key->value());
+  PushOperand(rax);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreToSuper_Strict
+                              : Runtime::kStoreToSuper_Sloppy);
 }
 
 
@@ -2541,17 +2385,17 @@
   // stack : receiver ('this'), home_object, key
   DCHECK(prop != NULL);
 
-  __ Push(rax);
-  __ CallRuntime((is_strict(language_mode())
-                      ? Runtime::kStoreKeyedToSuper_Strict
-                      : Runtime::kStoreKeyedToSuper_Sloppy));
+  PushOperand(rax);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreKeyedToSuper_Strict
+                              : Runtime::kStoreKeyedToSuper_Sloppy);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
-  __ Pop(StoreDescriptor::NameRegister());  // Key.
-  __ Pop(StoreDescriptor::ReceiverRegister());
+  PopOperand(StoreDescriptor::NameRegister());  // Key.
+  PopOperand(StoreDescriptor::ReceiverRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(rax));
   Handle<Code> ic =
       CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2586,7 +2430,7 @@
       VisitForStackValue(expr->obj());
       VisitForAccumulatorValue(expr->key());
       __ Move(LoadDescriptor::NameRegister(), rax);
-      __ Pop(LoadDescriptor::ReceiverRegister());
+      PopOperand(LoadDescriptor::ReceiverRegister());
       EmitKeyedPropertyLoad(expr);
     } else {
       VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2621,7 +2465,7 @@
     }
     // Push undefined as receiver. This is patched in the Call builtin if it
     // is a sloppy mode method.
-    __ Push(isolate()->factory()->undefined_value());
+    PushOperand(isolate()->factory()->undefined_value());
     convert_mode = ConvertReceiverMode::kNullOrUndefined;
   } else {
     // Load the function from the receiver.
@@ -2631,7 +2475,7 @@
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
-    __ Push(Operand(rsp, 0));
+    PushOperand(Operand(rsp, 0));
     __ movp(Operand(rsp, kPointerSize), rax);
     convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
   }
@@ -2653,11 +2497,10 @@
   SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
   VisitForStackValue(super_ref->home_object());
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(rax);
-  __ Push(rax);
-  __ Push(Operand(rsp, kPointerSize * 2));
-  __ Push(key->value());
-  __ Push(Smi::FromInt(language_mode()));
+  PushOperand(rax);
+  PushOperand(rax);
+  PushOperand(Operand(rsp, kPointerSize * 2));
+  PushOperand(key->value());
 
   // Stack here:
   //  - home_object
@@ -2665,8 +2508,7 @@
   //  - this (receiver) <-- LoadFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
 
   // Replace home_object with target function.
   __ movp(Operand(rsp, kPointerSize), rax);
@@ -2694,7 +2536,7 @@
   PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
 
   // Push the target function under the receiver.
-  __ Push(Operand(rsp, 0));
+  PushOperand(Operand(rsp, 0));
   __ movp(Operand(rsp, kPointerSize), rax);
 
   EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2712,11 +2554,10 @@
   SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
   VisitForStackValue(super_ref->home_object());
   VisitForAccumulatorValue(super_ref->this_var());
-  __ Push(rax);
-  __ Push(rax);
-  __ Push(Operand(rsp, kPointerSize * 2));
+  PushOperand(rax);
+  PushOperand(rax);
+  PushOperand(Operand(rsp, kPointerSize * 2));
   VisitForStackValue(prop->key());
-  __ Push(Smi::FromInt(language_mode()));
 
   // Stack here:
   //  - home_object
@@ -2724,8 +2565,7 @@
   //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
 
   // Replace home_object with target function.
   __ movp(Operand(rsp, kPointerSize), rax);
@@ -2747,12 +2587,23 @@
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
   SetCallPosition(expr);
-  Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+  if (expr->tail_call_mode() == TailCallMode::kAllow) {
+    if (FLAG_trace) {
+      __ CallRuntime(Runtime::kTraceTailCall);
+    }
+    // Update profiling counters before the tail call since we will
+    // not return to this function.
+    EmitProfilingCounterHandlingForReturnSequence(true);
+  }
+  Handle<Code> ic =
+      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+          .code();
   __ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
   __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
   // Don't assign a type feedback id to the IC, since type feedback is provided
   // by the vector above.
   CallIC(ic);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
 
@@ -2797,11 +2648,10 @@
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in rax) and
     // the object holding it (returned in rdx).
-    __ Push(context_register());
     __ Push(callee->name());
-    __ CallRuntime(Runtime::kLoadLookupSlot);
-    __ Push(rax);  // Function.
-    __ Push(rdx);  // Receiver.
+    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+    PushOperand(rax);  // Function.
+    PushOperand(rdx);  // Receiver.
     PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the function
@@ -2821,6 +2671,7 @@
   } else {
     VisitForStackValue(callee);
     // refEnv.WithBaseObject()
+    OperandStackDepthIncrement(1);
     __ PushRoot(Heap::kUndefinedValueRootIndex);
   }
 }
@@ -2852,7 +2703,10 @@
   SetCallPosition(expr);
   __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
   __ Set(rax, arg_count);
-  __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                      expr->tail_call_mode()),
+          RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   // Restore context register.
   __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2893,6 +2747,7 @@
 
   CallConstructStub stub(isolate());
   __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   // Restore context register.
   __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2911,7 +2766,7 @@
   __ AssertFunction(result_register());
   __ movp(result_register(),
           FieldOperand(result_register(), HeapObject::kMapOffset));
-  __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
+  PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2933,6 +2788,7 @@
   __ movp(rdi, Operand(rsp, arg_count * kPointerSize));
 
   __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
 
@@ -2986,77 +2842,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(rax, if_false);
-  __ CmpObjectType(rax, SIMD128_VALUE_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(rax, if_false);
-  __ CmpObjectType(rax, FIRST_FUNCTION_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(above_equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
-  __ CheckMap(rax, map, if_false, DO_SMI_CHECK);
-  __ cmpl(FieldOperand(rax, HeapNumber::kExponentOffset),
-          Immediate(0x1));
-  __ j(no_overflow, if_false);
-  __ cmpl(FieldOperand(rax, HeapNumber::kMantissaOffset),
-          Immediate(0x00000000));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3146,68 +2931,6 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ Pop(rbx);
-  __ cmpp(rax, rbx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in rdx and the formal
-  // parameter count in rax.
-  VisitForAccumulatorValue(args->at(0));
-  __ movp(rdx, rax);
-  __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
-  ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
-  __ CallStub(&stub);
-  context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  DCHECK(expr->arguments()->length() == 0);
-
-  Label exit;
-  // Get the number of formal parameters.
-  __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &exit, Label::kNear);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ movp(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  __ AssertSmi(rax);
-  context()->Plug(rax);
-}
-
-
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3276,28 +2999,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = nullptr;
-  Label* if_false = nullptr;
-  Label* fall_through = nullptr;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(rax, if_false);
-  __ CmpObjectType(rax, JS_DATE_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(3, args->length());
@@ -3309,8 +3010,8 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(value);
-  __ Pop(index);
+  PopOperand(value);
+  PopOperand(index);
 
   if (FLAG_debug_code) {
     __ Check(__ CheckSmi(value), kNonSmiValue);
@@ -3342,8 +3043,8 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ Pop(value);
-  __ Pop(index);
+  PopOperand(value);
+  PopOperand(index);
 
   if (FLAG_debug_code) {
     __ Check(__ CheckSmi(value), kNonSmiValue);
@@ -3364,34 +3065,6 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));  // Load the object.
-  VisitForAccumulatorValue(args->at(1));  // Load the value.
-  __ Pop(rbx);  // rax = value. rbx = object.
-
-  Label done;
-  // If the object is a smi, return the value.
-  __ JumpIfSmi(rbx, &done);
-
-  // If the object is not a value type, return the value.
-  __ CmpObjectType(rbx, JS_VALUE_TYPE, rcx);
-  __ j(not_equal, &done);
-
-  // Store the value.
-  __ movp(FieldOperand(rbx, JSValue::kValueOffset), rax);
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  __ movp(rdx, rax);
-  __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
-
-  __ bind(&done);
-  context()->Plug(rax);
-}
-
-
 void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3409,27 +3082,6 @@
 }
 
 
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into rax and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Convert the object to a name.
-  Label convert, done_convert;
-  __ JumpIfSmi(rax, &convert, Label::kNear);
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ CmpObjectType(rax, LAST_NAME_TYPE, rcx);
-  __ j(below_equal, &done_convert, Label::kNear);
-  __ bind(&convert);
-  __ Push(rax);
-  __ CallRuntime(Runtime::kToName);
-  __ bind(&done_convert);
-  context()->Plug(rax);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3460,7 +3112,7 @@
   Register index = rax;
   Register result = rdx;
 
-  __ Pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3507,7 +3159,7 @@
   Register scratch = rdx;
   Register result = rax;
 
-  __ Pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3557,6 +3209,7 @@
   // Call the target.
   __ Set(rax, argc);
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(argc + 1);
   // Restore context register.
   __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   // Discard the function left on TOS.
@@ -3613,296 +3266,6 @@
 }
 
 
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
-  Label bailout, return_result, done, one_char_separator, long_separator,
-      non_trivial_array, not_size_one_array, loop,
-      loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  // We will leave the separator on the stack until the end of the function.
-  VisitForStackValue(args->at(1));
-  // Load this to rax (= array)
-  VisitForAccumulatorValue(args->at(0));
-  // All aliases of the same register have disjoint lifetimes.
-  Register array = rax;
-  Register elements = no_reg;  // Will be rax.
-
-  Register index = rdx;
-
-  Register string_length = rcx;
-
-  Register string = rsi;
-
-  Register scratch = rbx;
-
-  Register array_length = rdi;
-  Register result_pos = no_reg;  // Will be rdi.
-
-  Operand separator_operand =    Operand(rsp, 2 * kPointerSize);
-  Operand result_operand =       Operand(rsp, 1 * kPointerSize);
-  Operand array_length_operand = Operand(rsp, 0 * kPointerSize);
-  // Separator operand is already pushed. Make room for the two
-  // other stack fields, and clear the direction flag in anticipation
-  // of calling CopyBytes.
-  __ subp(rsp, Immediate(2 * kPointerSize));
-  __ cld();
-  // Check that the array is a JSArray
-  __ JumpIfSmi(array, &bailout);
-  __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, &bailout);
-
-  // Check that the array has fast elements.
-  __ CheckFastElements(scratch, &bailout);
-
-  // Array has fast elements, so its length must be a smi.
-  // If the array has length zero, return the empty string.
-  __ movp(array_length, FieldOperand(array, JSArray::kLengthOffset));
-  __ SmiCompare(array_length, Smi::FromInt(0));
-  __ j(not_zero, &non_trivial_array);
-  __ LoadRoot(rax, Heap::kempty_stringRootIndex);
-  __ jmp(&return_result);
-
-  // Save the array length on the stack.
-  __ bind(&non_trivial_array);
-  __ SmiToInteger32(array_length, array_length);
-  __ movl(array_length_operand, array_length);
-
-  // Save the FixedArray containing array's elements.
-  // End of array's live range.
-  elements = array;
-  __ movp(elements, FieldOperand(array, JSArray::kElementsOffset));
-  array = no_reg;
-
-
-  // Check that all array elements are sequential one-byte strings, and
-  // accumulate the sum of their lengths, as a smi-encoded value.
-  __ Set(index, 0);
-  __ Set(string_length, 0);
-  // Loop condition: while (index < array_length).
-  // Live loop registers: index(int32), array_length(int32), string(String*),
-  //                      scratch, string_length(int32), elements(FixedArray*).
-  if (generate_debug_code_) {
-    __ cmpp(index, array_length);
-    __ Assert(below, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
-  }
-  __ bind(&loop);
-  __ movp(string, FieldOperand(elements,
-                               index,
-                               times_pointer_size,
-                               FixedArray::kHeaderSize));
-  __ JumpIfSmi(string, &bailout);
-  __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ andb(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
-  __ j(not_equal, &bailout);
-  __ AddSmiField(string_length,
-                 FieldOperand(string, SeqOneByteString::kLengthOffset));
-  __ j(overflow, &bailout);
-  __ incl(index);
-  __ cmpl(index, array_length);
-  __ j(less, &loop);
-
-  // Live registers:
-  // string_length: Sum of string lengths.
-  // elements: FixedArray of strings.
-  // index: Array length.
-  // array_length: Array length.
-
-  // If array_length is 1, return elements[0], a string.
-  __ cmpl(array_length, Immediate(1));
-  __ j(not_equal, &not_size_one_array);
-  __ movp(rax, FieldOperand(elements, FixedArray::kHeaderSize));
-  __ jmp(&return_result);
-
-  __ bind(&not_size_one_array);
-
-  // End of array_length live range.
-  result_pos = array_length;
-  array_length = no_reg;
-
-  // Live registers:
-  // string_length: Sum of string lengths.
-  // elements: FixedArray of strings.
-  // index: Array length.
-
-  // Check that the separator is a sequential one-byte string.
-  __ movp(string, separator_operand);
-  __ JumpIfSmi(string, &bailout);
-  __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ andb(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
-  __ j(not_equal, &bailout);
-
-  // Live registers:
-  // string_length: Sum of string lengths.
-  // elements: FixedArray of strings.
-  // index: Array length.
-  // string: Separator string.
-
-  // Add (separator length times (array_length - 1)) to string_length.
-  __ SmiToInteger32(scratch,
-                    FieldOperand(string, SeqOneByteString::kLengthOffset));
-  __ decl(index);
-  __ imull(scratch, index);
-  __ j(overflow, &bailout);
-  __ addl(string_length, scratch);
-  __ j(overflow, &bailout);
-  __ jmp(&bailout);
-
-  // Bailout for large object allocations.
-  __ cmpl(string_length, Immediate(Page::kMaxRegularHeapObjectSize));
-  __ j(greater, &bailout);
-
-  // Live registers and stack values:
-  //   string_length: Total length of result string.
-  //   elements: FixedArray of strings.
-  __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
-                           &bailout);
-  __ movp(result_operand, result_pos);
-  __ leap(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
-
-  __ movp(string, separator_operand);
-  __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
-                Smi::FromInt(1));
-  __ j(equal, &one_char_separator);
-  __ j(greater, &long_separator);
-
-
-  // Empty separator case:
-  __ Set(index, 0);
-  __ movl(scratch, array_length_operand);
-  __ jmp(&loop_1_condition);
-  // Loop condition: while (index < array_length).
-  __ bind(&loop_1);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-  //   elements: the FixedArray of strings we are joining.
-  //   scratch: array length.
-
-  // Get string = array[index].
-  __ movp(string, FieldOperand(elements, index,
-                               times_pointer_size,
-                               FixedArray::kHeaderSize));
-  __ SmiToInteger32(string_length,
-                    FieldOperand(string, String::kLengthOffset));
-  __ leap(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(result_pos, string, string_length);
-  __ incl(index);
-  __ bind(&loop_1_condition);
-  __ cmpl(index, scratch);
-  __ j(less, &loop_1);  // Loop while (index < array_length).
-  __ jmp(&done);
-
-  // Generic bailout code used from several places.
-  __ bind(&bailout);
-  __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ jmp(&return_result);
-
-
-  // One-character separator case
-  __ bind(&one_char_separator);
-  // Get the separator one-byte character value.
-  // Register "string" holds the separator.
-  __ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ Set(index, 0);
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_2_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_2);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   elements: The FixedArray of strings we are joining.
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-  //   scratch: Separator character.
-
-  // Copy the separator character to the result.
-  __ movb(Operand(result_pos, 0), scratch);
-  __ incp(result_pos);
-
-  __ bind(&loop_2_entry);
-  // Get string = array[index].
-  __ movp(string, FieldOperand(elements, index,
-                               times_pointer_size,
-                               FixedArray::kHeaderSize));
-  __ SmiToInteger32(string_length,
-                    FieldOperand(string, String::kLengthOffset));
-  __ leap(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(result_pos, string, string_length);
-  __ incl(index);
-  __ cmpl(index, array_length_operand);
-  __ j(less, &loop_2);  // End while (index < length).
-  __ jmp(&done);
-
-
-  // Long separator case (separator is more than one character).
-  __ bind(&long_separator);
-
-  // Make elements point to end of elements array, and index
-  // count from -array_length to zero, so we don't need to maintain
-  // a loop limit.
-  __ movl(index, array_length_operand);
-  __ leap(elements, FieldOperand(elements, index, times_pointer_size,
-                                FixedArray::kHeaderSize));
-  __ negq(index);
-
-  // Replace separator string with pointer to its first character, and
-  // make scratch be its length.
-  __ movp(string, separator_operand);
-  __ SmiToInteger32(scratch,
-                    FieldOperand(string, String::kLengthOffset));
-  __ leap(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ movp(separator_operand, string);
-
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_3_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_3);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-  //   scratch: Separator length.
-  //   separator_operand (rsp[0x10]): Address of first char of separator.
-
-  // Copy the separator to the result.
-  __ movp(string, separator_operand);
-  __ movl(string_length, scratch);
-  __ CopyBytes(result_pos, string, string_length, 2);
-
-  __ bind(&loop_3_entry);
-  // Get string = array[index].
-  __ movp(string, Operand(elements, index, times_pointer_size, 0));
-  __ SmiToInteger32(string_length,
-                    FieldOperand(string, String::kLengthOffset));
-  __ leap(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(result_pos, string, string_length);
-  __ incq(index);
-  __ j(not_equal, &loop_3);  // Loop while (index < 0).
-
-  __ bind(&done);
-  __ movp(rax, result_operand);
-
-  __ bind(&return_result);
-  // Drop temp values from the stack, and restore context register.
-  __ addp(rsp, Immediate(3 * kPointerSize));
-  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  context()->Plug(rax);
-}
-
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3934,7 +3297,7 @@
   __ jmp(&done, Label::kNear);
 
   __ bind(&runtime);
-  __ CallRuntime(Runtime::kCreateIterResultObject);
+  CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
 
   __ bind(&done);
   context()->Plug(rax);
@@ -3943,6 +3306,7 @@
 
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
   // Push the builtins object as receiver.
+  OperandStackDepthIncrement(1);
   __ PushRoot(Heap::kUndefinedValueRootIndex);
 
   __ LoadNativeContextSlot(expr->context_index(), rax);
@@ -3958,6 +3322,7 @@
   __ Set(rax, arg_count);
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 }
 
 
@@ -3971,7 +3336,7 @@
     EmitLoadJSRuntimeFunction(expr);
 
     // Push the target function under the receiver.
-    __ Push(Operand(rsp, 0));
+    PushOperand(Operand(rsp, 0));
     __ movp(Operand(rsp, kPointerSize), rax);
 
     // Push the arguments ("left-to-right").
@@ -4006,6 +3371,7 @@
         // Call the C runtime.
         PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
         __ CallRuntime(function, arg_count);
+        OperandStackDepthDecrement(arg_count);
         context()->Plug(rax);
       }
     }
@@ -4023,9 +3389,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ CallRuntime(is_strict(language_mode())
-                           ? Runtime::kDeleteProperty_Strict
-                           : Runtime::kDeleteProperty_Sloppy);
+        CallRuntimeWithOperands(is_strict(language_mode())
+                                    ? Runtime::kDeleteProperty_Strict
+                                    : Runtime::kDeleteProperty_Sloppy);
         context()->Plug(rax);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
@@ -4047,7 +3413,6 @@
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
-          __ Push(context_register());
           __ Push(var->name());
           __ CallRuntime(Runtime::kDeleteLookupSlot);
           context()->Plug(rax);
@@ -4093,6 +3458,7 @@
                         &materialize_false,
                         &materialize_true,
                         &materialize_true);
+        if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
         PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
         if (context()->IsAccumulatorValue()) {
@@ -4148,7 +3514,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      __ Push(Smi::FromInt(0));
+      PushOperand(Smi::FromInt(0));
     }
     switch (assign_type) {
       case NAMED_PROPERTY: {
@@ -4162,9 +3528,9 @@
         VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
         VisitForAccumulatorValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
-        __ Push(result_register());
-        __ Push(MemOperand(rsp, kPointerSize));
-        __ Push(result_register());
+        PushOperand(result_register());
+        PushOperand(MemOperand(rsp, kPointerSize));
+        PushOperand(result_register());
         EmitNamedSuperPropertyLoad(prop);
         break;
       }
@@ -4174,10 +3540,10 @@
         VisitForStackValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
         VisitForAccumulatorValue(prop->key());
-        __ Push(result_register());
-        __ Push(MemOperand(rsp, 2 * kPointerSize));
-        __ Push(MemOperand(rsp, 2 * kPointerSize));
-        __ Push(result_register());
+        PushOperand(result_register());
+        PushOperand(MemOperand(rsp, 2 * kPointerSize));
+        PushOperand(MemOperand(rsp, 2 * kPointerSize));
+        PushOperand(result_register());
         EmitKeyedSuperPropertyLoad(prop);
         break;
       }
@@ -4266,7 +3632,7 @@
       // of the stack.
       switch (assign_type) {
         case VARIABLE:
-          __ Push(rax);
+          PushOperand(rax);
           break;
         case NAMED_PROPERTY:
           __ movp(Operand(rsp, kPointerSize), rax);
@@ -4290,8 +3656,8 @@
   __ bind(&stub_call);
   __ movp(rdx, rax);
   __ Move(rax, Smi::FromInt(1));
-  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
-                                              strength(language_mode())).code();
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
   CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
@@ -4326,7 +3692,7 @@
     case NAMED_PROPERTY: {
       __ Move(StoreDescriptor::NameRegister(),
               prop->key()->AsLiteral()->value());
-      __ Pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4362,8 +3728,8 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ Pop(StoreDescriptor::NameRegister());
-      __ Pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::NameRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
@@ -4417,8 +3783,8 @@
     __ CompareRoot(rax, Heap::kFalseValueRootIndex);
     Split(equal, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
-    __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-    __ j(equal, if_true);
+    __ CompareRoot(rax, Heap::kNullValueRootIndex);
+    __ j(equal, if_false);
     __ JumpIfSmi(rax, if_false);
     // Check for undetectable objects => true.
     __ movp(rdx, FieldOperand(rax, HeapObject::kMapOffset));
@@ -4484,7 +3850,7 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      __ CallRuntime(Runtime::kHasProperty);
+      CallRuntimeWithOperands(Runtime::kHasProperty);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(rax, Heap::kTrueValueRootIndex);
       Split(equal, if_true, if_false, fall_through);
@@ -4492,7 +3858,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
-      __ Pop(rdx);
+      PopOperand(rdx);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4504,7 +3870,7 @@
     default: {
       VisitForAccumulatorValue(expr->right());
       Condition cc = CompareIC::ComputeCondition(op);
-      __ Pop(rdx);
+      PopOperand(rdx);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4518,8 +3884,7 @@
         __ bind(&slow_case);
       }
 
-      Handle<Code> ic = CodeFactory::CompareIC(
-                            isolate(), op, strength(language_mode())).code();
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
 
@@ -4598,15 +3963,15 @@
     // as their closure, not the anonymous closure containing the global
     // code.
     __ movp(rax, NativeContextOperand());
-    __ Push(ContextOperand(rax, Context::CLOSURE_INDEX));
+    PushOperand(ContextOperand(rax, Context::CLOSURE_INDEX));
   } else if (closure_scope->is_eval_scope()) {
     // Contexts created by a call to eval have the same closure as the
     // context calling eval, not the anonymous closure containing the eval
     // code.  Fetch it from the context.
-    __ Push(ContextOperand(rsi, Context::CLOSURE_INDEX));
+    PushOperand(ContextOperand(rsi, Context::CLOSURE_INDEX));
   } else {
     DCHECK(closure_scope->is_function_scope());
-    __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+    PushOperand(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   }
 }
 
@@ -4617,22 +3982,12 @@
 
 void FullCodeGenerator::EnterFinallyBlock() {
   DCHECK(!result_register().is(rdx));
-  DCHECK(!result_register().is(rcx));
-  // Cook return address on top of stack (smi encoded Code* delta)
-  __ PopReturnAddressTo(rdx);
-  __ Move(rcx, masm_->CodeObject());
-  __ subp(rdx, rcx);
-  __ Integer32ToSmi(rdx, rdx);
-  __ Push(rdx);
-
-  // Store result register while executing finally block.
-  __ Push(result_register());
 
   // Store pending message while executing finally block.
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ Load(rdx, pending_message_obj);
-  __ Push(rdx);
+  PushOperand(rdx);
 
   ClearPendingMessage();
 }
@@ -4640,22 +3995,11 @@
 
 void FullCodeGenerator::ExitFinallyBlock() {
   DCHECK(!result_register().is(rdx));
-  DCHECK(!result_register().is(rcx));
   // Restore pending message from stack.
-  __ Pop(rdx);
+  PopOperand(rdx);
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ Store(pending_message_obj, rdx);
-
-  // Restore result register from stack.
-  __ Pop(result_register());
-
-  // Uncook return address.
-  __ Pop(rdx);
-  __ SmiToInteger32(rdx, rdx);
-  __ Move(rcx, masm_->CodeObject());
-  __ addp(rdx, rcx);
-  __ jmp(rdx);
 }
 
 
@@ -4673,6 +4017,31 @@
   __ Move(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
 }
 
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+  __ Pop(result_register());  // Restore the accumulator.
+  __ Pop(rdx);                // Get the token.
+  for (DeferredCommand cmd : commands_) {
+    Label skip;
+    __ SmiCompare(rdx, Smi::FromInt(cmd.token));
+    __ j(not_equal, &skip);
+    switch (cmd.command) {
+      case kReturn:
+        codegen_->EmitUnwindAndReturn();
+        break;
+      case kThrow:
+        __ Push(result_register());
+        __ CallRuntime(Runtime::kReThrow);
+        break;
+      case kContinue:
+        codegen_->EmitContinue(cmd.target);
+        break;
+      case kBreak:
+        codegen_->EmitBreak(cmd.target);
+        break;
+    }
+    __ bind(&skip);
+  }
+}
 
 #undef __
 
@@ -4753,7 +4122,6 @@
   return OSR_AFTER_STACK_CHECK;
 }
 
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/full-codegen/x87/full-codegen-x87.cc b/src/full-codegen/x87/full-codegen-x87.cc
index c38230a..36b7c5d 100644
--- a/src/full-codegen/x87/full-codegen-x87.cc
+++ b/src/full-codegen/x87/full-codegen-x87.cc
@@ -17,8 +17,7 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
 
 class JumpPatchSite BASE_EMBEDDED {
  public:
@@ -68,6 +67,7 @@
     __ j(cc, target, distance);
   }
 
+  MacroAssembler* masm() { return masm_; }
   MacroAssembler* masm_;
   Label patch_site_;
 #ifdef DEBUG
@@ -99,13 +99,6 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm_);
 
-#ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
-    __ int3();
-  }
-#endif
-
   if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
     int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
     __ mov(ecx, Operand(esp, receiver_offset));
@@ -126,6 +119,7 @@
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
     DCHECK(!IsGeneratorFunction(literal()->kind()) || locals_count == 0);
+    OperandStackDepthIncrement(locals_count);
     if (locals_count == 1) {
       __ push(Immediate(isolate()->factory()->undefined_value()));
     } else if (locals_count > 1) {
@@ -256,48 +250,33 @@
   Variable* rest_param = scope()->rest_parameter(&rest_index);
   if (rest_param) {
     Comment cmnt(masm_, "[ Allocate rest parameter array");
-
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-
-    __ mov(RestParamAccessDescriptor::parameter_count(),
-           Immediate(Smi::FromInt(num_parameters)));
-    __ lea(RestParamAccessDescriptor::parameter_pointer(),
-           Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
-    __ mov(RestParamAccessDescriptor::rest_parameter_index(),
-           Immediate(Smi::FromInt(rest_index)));
-    function_in_register = false;
-
-    RestParamAccessStub stub(isolate());
+    if (!function_in_register) {
+      __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+    }
+    FastNewRestParameterStub stub(isolate());
     __ CallStub(&stub);
+    function_in_register = false;
     SetVar(rest_param, eax, ebx, edx);
   }
 
   Variable* arguments = scope()->arguments();
   if (arguments != NULL) {
-    // Function uses arguments object.
+    // Arguments object must be allocated after the context object, in
+    // case the "arguments" or ".arguments" variables are in the context.
     Comment cmnt(masm_, "[ Allocate arguments object");
-    DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
     if (!function_in_register) {
       __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     }
-    // Receiver is just before the parameters on the caller's stack.
-    int num_parameters = info->scope()->num_parameters();
-    int offset = num_parameters * kPointerSize;
-    __ mov(ArgumentsAccessNewDescriptor::parameter_count(),
-           Immediate(Smi::FromInt(num_parameters)));
-    __ lea(ArgumentsAccessNewDescriptor::parameter_pointer(),
-           Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
-
-    // Arguments to ArgumentsAccessStub:
-    //   function, parameter pointer, parameter count.
-    // The stub will rewrite parameter pointer and parameter count if the
-    // previous stack frame was an arguments adapter frame.
-    bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
-    ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
-        is_unmapped, literal()->has_duplicate_parameters());
-    ArgumentsAccessStub stub(isolate(), type);
-    __ CallStub(&stub);
+    if (is_strict(language_mode()) || !has_simple_parameters()) {
+      FastNewStrictArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    } else if (literal()->has_duplicate_parameters()) {
+      __ Push(edi);
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+    } else {
+      FastNewSloppyArgumentsStub stub(isolate());
+      __ CallStub(&stub);
+    }
 
     SetVar(arguments, eax, ebx, edx);
   }
@@ -398,6 +377,30 @@
   PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
 }
 
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+    bool is_tail_call) {
+  // Pretend that the exit is a backwards jump to the entry.
+  int weight = 1;
+  if (info_->ShouldSelfOptimize()) {
+    weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+  } else {
+    int distance = masm_->pc_offset();
+    weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+  }
+  EmitProfilingCounterDecrement(weight);
+  Label ok;
+  __ j(positive, &ok, Label::kNear);
+  // Don't need to save result register if we are going to do a tail call.
+  if (!is_tail_call) {
+    __ push(eax);
+  }
+  __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+  if (!is_tail_call) {
+    __ pop(eax);
+  }
+  EmitProfilingCounterReset();
+  __ bind(&ok);
+}
 
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
@@ -410,24 +413,7 @@
       __ push(eax);
       __ CallRuntime(Runtime::kTraceExit);
     }
-    // Pretend that the exit is a backwards jump to the entry.
-    int weight = 1;
-    if (info_->ShouldSelfOptimize()) {
-      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-    } else {
-      int distance = masm_->pc_offset();
-      weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kCodeSizeMultiplier));
-    }
-    EmitProfilingCounterDecrement(weight);
-    Label ok;
-    __ j(positive, &ok, Label::kNear);
-    __ push(eax);
-    __ call(isolate()->builtins()->InterruptCheck(),
-            RelocInfo::CODE_TARGET);
-    __ pop(eax);
-    EmitProfilingCounterReset();
-    __ bind(&ok);
+    EmitProfilingCounterHandlingForReturnSequence(false);
 
     SetReturnPosition(literal());
     __ leave();
@@ -443,7 +429,7 @@
   DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   MemOperand operand = codegen()->VarOperand(var, result_register());
   // Memory operands can be pushed directly.
-  __ push(operand);
+  codegen()->PushOperand(operand);
 }
 
 
@@ -484,6 +470,7 @@
 
 
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+  codegen()->OperandStackDepthIncrement(1);
   if (lit->IsSmi()) {
     __ SafePush(Immediate(lit));
   } else {
@@ -497,7 +484,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -522,41 +509,14 @@
 }
 
 
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
-                                                   Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
-    int count,
-    Register reg) const {
-  DCHECK(count > 0);
-  __ Drop(count);
-  __ Move(result_register(), reg);
-}
-
-
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
   DCHECK(count > 0);
-  if (count > 1) __ Drop(count - 1);
+  if (count > 1) codegen()->DropOperands(count - 1);
   __ mov(Operand(esp, 0), reg);
 }
 
 
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
-                                                 Register reg) const {
-  DCHECK(count > 0);
-  // For simplicity we always test the accumulator register.
-  __ Drop(count);
-  __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
-  codegen()->DoTest(this);
-}
-
-
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
   DCHECK(materialize_true == materialize_false);
@@ -580,6 +540,7 @@
 void FullCodeGenerator::StackValueContext::Plug(
     Label* materialize_true,
     Label* materialize_false) const {
+  codegen()->OperandStackDepthIncrement(1);
   Label done;
   __ bind(materialize_true);
   __ push(Immediate(isolate()->factory()->true_value()));
@@ -606,6 +567,7 @@
 
 
 void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+  codegen()->OperandStackDepthIncrement(1);
   Handle<Object> value = flag
       ? isolate()->factory()->true_value()
       : isolate()->factory()->false_value();
@@ -728,7 +690,7 @@
 void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
   // The variable in the declaration always resides in the current context.
   DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
-  if (generate_debug_code_) {
+  if (FLAG_debug_code) {
     // Check that we're not inside a with or catch context.
     __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
     __ cmp(ebx, isolate()->factory()->with_context_map());
@@ -838,11 +800,10 @@
 
     case VariableLocation::LOOKUP: {
       Comment cmnt(masm_, "[ FunctionDeclaration");
-      __ push(Immediate(variable->name()));
+      PushOperand(variable->name());
       VisitForStackValue(declaration->fun());
-      __ push(
-          Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
-      __ CallRuntime(Runtime::kDeclareLookupSlot);
+      PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+      CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
       break;
     }
   }
@@ -915,8 +876,8 @@
     }
 
     SetExpressionPosition(clause);
-    Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
-                                             strength(language_mode())).code();
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -938,7 +899,7 @@
   // Discard the test value and jump to the default if present, otherwise to
   // the end of the statement.
   __ bind(&next_test);
-  __ Drop(1);  // Switch value is no longer needed.
+  DropOperands(1);  // Switch value is no longer needed.
   if (default_clause == NULL) {
     __ jmp(nested_statement.break_label());
   } else {
@@ -969,22 +930,21 @@
   ForIn loop_statement(this, stmt);
   increment_loop_depth();
 
-  // Get the object to enumerate over. If the object is null or undefined, skip
-  // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
-  __ cmp(eax, isolate()->factory()->undefined_value());
-  __ j(equal, &exit);
-  __ cmp(eax, isolate()->factory()->null_value());
-  __ j(equal, &exit);
+  OperandStackDepthIncrement(ForIn::kElementCount);
 
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
-  // Convert the object to a JS object.
+  // If the object is null or undefined, skip over the loop, otherwise convert
+  // it to a JS receiver.  See ECMA-262 version 5, section 12.6.4.
   Label convert, done_convert;
   __ JumpIfSmi(eax, &convert, Label::kNear);
   __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
   __ j(above_equal, &done_convert, Label::kNear);
+  __ cmp(eax, isolate()->factory()->undefined_value());
+  __ j(equal, &exit);
+  __ cmp(eax, isolate()->factory()->null_value());
+  __ j(equal, &exit);
   __ bind(&convert);
   ToObjectStub stub(isolate());
   __ CallStub(&stub);
@@ -992,15 +952,13 @@
   PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
   __ push(eax);
 
-  // Check for proxies.
-  Label call_runtime, use_cache, fixed_array;
-  __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
-  __ j(equal, &call_runtime);
-
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
+  // Note: Proxies never have an enum cache, so will always take the
+  // slow path.
+  Label call_runtime, use_cache, fixed_array;
   __ CheckEnumCache(&call_runtime);
 
   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
@@ -1009,7 +967,7 @@
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
   __ push(eax);
-  __ CallRuntime(Runtime::kGetPropertyNamesFast);
+  __ CallRuntime(Runtime::kForInEnumerate);
   PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          isolate()->factory()->meta_map());
@@ -1043,14 +1001,15 @@
   __ bind(&fixed_array);
 
   // No need for a write barrier, we are storing a Smi in the feedback vector.
+  int const vector_index = SmiFromSlot(slot)->value();
   __ EmitLoadTypeFeedbackVector(ebx);
-  int vector_index = SmiFromSlot(slot)->value();
   __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
          Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ push(Immediate(Smi::FromInt(1)));  // Smi(1) undicates slow check
   __ push(eax);  // Array
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
   __ push(eax);  // Fixed array length (as smi).
+  PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
   __ push(Immediate(Smi::FromInt(0)));  // Initial index.
 
   // Generate code for doing the condition check.
@@ -1076,6 +1035,16 @@
   __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
+  // We might get here from TurboFan or Crankshaft when something in the
+  // for-in loop body deopts and only now notice in fullcodegen, that we
+  // can now longer use the enum cache, i.e. left fast mode. So better record
+  // this information here, in case we later OSR back into this loop or
+  // reoptimize the whole function w/o rerunning the loop with the slow
+  // mode object in fullcodegen (which would result in a deopt loop).
+  __ EmitLoadTypeFeedbackVector(edx);
+  __ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
+         Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
   // just skip it.
@@ -1113,6 +1082,7 @@
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
   __ add(esp, Immediate(5 * kPointerSize));
+  OperandStackDepthDecrement(ForIn::kElementCount);
 
   // Exit and decrement the loop depth.
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1355,12 +1325,11 @@
       // by eval-introduced variables.
       EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
       __ bind(&slow);
-      __ push(esi);  // Context.
       __ push(Immediate(var->name()));
       Runtime::FunctionId function_id =
           typeof_mode == NOT_INSIDE_TYPEOF
               ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotNoReferenceError;
+              : Runtime::kLoadLookupSlotInsideTypeof;
       __ CallRuntime(function_id);
       __ bind(&done);
       context()->Plug(eax);
@@ -1385,7 +1354,7 @@
 void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
   Expression* expression = (property == NULL) ? NULL : property->value();
   if (expression == NULL) {
-    __ push(Immediate(isolate()->factory()->null_value()));
+    PushOperand(isolate()->factory()->null_value());
   } else {
     VisitForStackValue(expression);
     if (NeedsHomeObject(expression)) {
@@ -1435,7 +1404,7 @@
     Literal* key = property->key()->AsLiteral();
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(eax);  // Save result on the stack
+      PushOperand(eax);  // Save result on the stack
       result_saved = true;
     }
     switch (property->kind()) {
@@ -1464,24 +1433,24 @@
           }
           break;
         }
-        __ push(Operand(esp, 0));  // Duplicate receiver.
+        PushOperand(Operand(esp, 0));  // Duplicate receiver.
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
           if (NeedsHomeObject(value)) {
             EmitSetHomeObject(value, 2, property->GetSlot());
           }
-          __ push(Immediate(Smi::FromInt(SLOPPY)));  // Language mode
-          __ CallRuntime(Runtime::kSetProperty);
+          PushOperand(Smi::FromInt(SLOPPY));  // Language mode
+          CallRuntimeWithOperands(Runtime::kSetProperty);
         } else {
-          __ Drop(3);
+          DropOperands(3);
         }
         break;
       case ObjectLiteral::Property::PROTOTYPE:
-        __ push(Operand(esp, 0));  // Duplicate receiver.
+        PushOperand(Operand(esp, 0));  // Duplicate receiver.
         VisitForStackValue(value);
         DCHECK(property->emit_store());
-        __ CallRuntime(Runtime::kInternalSetPrototype);
+        CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
         PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                                NO_REGISTERS);
         break;
@@ -1503,14 +1472,14 @@
   for (AccessorTable::Iterator it = accessor_table.begin();
        it != accessor_table.end();
        ++it) {
-    __ push(Operand(esp, 0));  // Duplicate receiver.
+    PushOperand(Operand(esp, 0));  // Duplicate receiver.
     VisitForStackValue(it->first);
 
     EmitAccessor(it->second->getter);
     EmitAccessor(it->second->setter);
 
-    __ push(Immediate(Smi::FromInt(NONE)));
-    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+    PushOperand(Smi::FromInt(NONE));
+    CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1527,17 +1496,17 @@
 
     Expression* value = property->value();
     if (!result_saved) {
-      __ push(eax);  // Save result on the stack
+      PushOperand(eax);  // Save result on the stack
       result_saved = true;
     }
 
-    __ push(Operand(esp, 0));  // Duplicate receiver.
+    PushOperand(Operand(esp, 0));  // Duplicate receiver.
 
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
       DCHECK(!property->is_computed_name());
       VisitForStackValue(value);
       DCHECK(property->emit_store());
-      __ CallRuntime(Runtime::kInternalSetPrototype);
+      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
       PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
                              NO_REGISTERS);
     } else {
@@ -1552,10 +1521,11 @@
         case ObjectLiteral::Property::MATERIALIZED_LITERAL:
         case ObjectLiteral::Property::COMPUTED:
           if (property->emit_store()) {
-            __ push(Immediate(Smi::FromInt(NONE)));
-            __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+            PushOperand(Smi::FromInt(NONE));
+            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
           } else {
-            __ Drop(3);
+            DropOperands(3);
           }
           break;
 
@@ -1564,13 +1534,13 @@
           break;
 
         case ObjectLiteral::Property::GETTER:
-          __ push(Immediate(Smi::FromInt(NONE)));
-          __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
           break;
 
         case ObjectLiteral::Property::SETTER:
-          __ push(Immediate(Smi::FromInt(NONE)));
-          __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+          PushOperand(Smi::FromInt(NONE));
+          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
           break;
       }
     }
@@ -1628,14 +1598,14 @@
   int array_index = 0;
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
-    if (subexpr->IsSpread()) break;
+    DCHECK(!subexpr->IsSpread());
 
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
-      __ push(eax);  // array literal.
+      PushOperand(eax);  // array literal.
       result_saved = true;
     }
     VisitForAccumulatorValue(subexpr);
@@ -1656,21 +1626,16 @@
   // (inclusive) and these elements gets appended to the array. Note that the
   // number elements an iterable produces is unknown ahead of time.
   if (array_index < length && result_saved) {
-    __ Pop(eax);
+    PopOperand(eax);
     result_saved = false;
   }
   for (; array_index < length; array_index++) {
     Expression* subexpr = subexprs->at(array_index);
 
-    __ Push(eax);
-    if (subexpr->IsSpread()) {
-      VisitForStackValue(subexpr->AsSpread()->expression());
-      __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
-                       CALL_FUNCTION);
-    } else {
-      VisitForStackValue(subexpr);
-      __ CallRuntime(Runtime::kAppendElement);
-    }
+    PushOperand(eax);
+    DCHECK(!subexpr->IsSpread());
+    VisitForStackValue(subexpr);
+    CallRuntimeWithOperands(Runtime::kAppendElement);
 
     PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
   }
@@ -1702,10 +1667,10 @@
           property->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           property->obj()->AsSuperPropertyReference()->home_object());
-      __ push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
-        __ push(MemOperand(esp, kPointerSize));
-        __ push(result_register());
+        PushOperand(MemOperand(esp, kPointerSize));
+        PushOperand(result_register());
       }
       break;
     case NAMED_PROPERTY:
@@ -1723,11 +1688,11 @@
       VisitForStackValue(
           property->obj()->AsSuperPropertyReference()->home_object());
       VisitForAccumulatorValue(property->key());
-      __ Push(result_register());
+      PushOperand(result_register());
       if (expr->is_compound()) {
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(result_register());
+        PushOperand(MemOperand(esp, 2 * kPointerSize));
+        PushOperand(MemOperand(esp, 2 * kPointerSize));
+        PushOperand(result_register());
       }
       break;
     case KEYED_PROPERTY: {
@@ -1774,7 +1739,7 @@
     }
 
     Token::Value op = expr->binary_op();
-    __ push(eax);  // Left operand goes on the stack.
+    PushOperand(eax);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
 
     if (ShouldInlineSmiCase(op)) {
@@ -1839,8 +1804,16 @@
 
       __ jmp(&suspend);
       __ bind(&continuation);
+      // When we arrive here, the stack top is the resume mode and
+      // result_register() holds the input value (the argument given to the
+      // respective resume operation).
       __ RecordGeneratorContinuation();
-      __ jmp(&resume);
+      __ pop(ebx);
+      __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
+      __ j(not_equal, &resume);
+      __ push(result_register());
+      EmitCreateIteratorResult(true);
+      EmitUnwindAndReturn();
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
@@ -1859,7 +1832,7 @@
       __ mov(context_register(),
              Operand(ebp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
-      __ pop(result_register());
+      PopOperand(result_register());
       EmitReturnSequence();
 
       __ bind(&resume);
@@ -1868,126 +1841,15 @@
     }
 
     case Yield::kFinal: {
-      VisitForAccumulatorValue(expr->generator_object());
-      __ mov(FieldOperand(result_register(),
-                          JSGeneratorObject::kContinuationOffset),
-             Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
       // Pop value from top-of-stack slot, box result into result register.
+      OperandStackDepthDecrement(1);
       EmitCreateIteratorResult(true);
-      EmitUnwindBeforeReturn();
-      EmitReturnSequence();
+      EmitUnwindAndReturn();
       break;
     }
 
-    case Yield::kDelegating: {
-      VisitForStackValue(expr->generator_object());
-
-      // Initial stack layout is as follows:
-      // [sp + 1 * kPointerSize] iter
-      // [sp + 0 * kPointerSize] g
-
-      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
-      Label l_next, l_call, l_loop;
-      Register load_receiver = LoadDescriptor::ReceiverRegister();
-      Register load_name = LoadDescriptor::NameRegister();
-
-      // Initial send value is undefined.
-      __ mov(eax, isolate()->factory()->undefined_value());
-      __ jmp(&l_next);
-
-      // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
-      __ bind(&l_catch);
-      __ mov(load_name, isolate()->factory()->throw_string());  // "throw"
-      __ push(load_name);                                       // "throw"
-      __ push(Operand(esp, 2 * kPointerSize));                  // iter
-      __ push(eax);                                             // exception
-      __ jmp(&l_call);
-
-      // try { received = %yield result }
-      // Shuffle the received result above a try handler and yield it without
-      // re-boxing.
-      __ bind(&l_try);
-      __ pop(eax);                                       // result
-      int handler_index = NewHandlerTableEntry();
-      EnterTryBlock(handler_index, &l_catch);
-      const int try_block_size = TryCatch::kElementCount * kPointerSize;
-      __ push(eax);                                      // result
-
-      __ jmp(&l_suspend);
-      __ bind(&l_continuation);
-      __ RecordGeneratorContinuation();
-      __ jmp(&l_resume);
-
-      __ bind(&l_suspend);
-      const int generator_object_depth = kPointerSize + try_block_size;
-      __ mov(eax, Operand(esp, generator_object_depth));
-      __ push(eax);                                      // g
-      __ push(Immediate(Smi::FromInt(handler_index)));   // handler-index
-      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
-             Immediate(Smi::FromInt(l_continuation.pos())));
-      __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
-      __ mov(ecx, esi);
-      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
-                          kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
-      __ mov(context_register(),
-             Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ pop(eax);                                       // result
-      EmitReturnSequence();
-      __ bind(&l_resume);                                // received in eax
-      ExitTryBlock(handler_index);
-
-      // receiver = iter; f = iter.next; arg = received;
-      __ bind(&l_next);
-
-      __ mov(load_name, isolate()->factory()->next_string());
-      __ push(load_name);                           // "next"
-      __ push(Operand(esp, 2 * kPointerSize));      // iter
-      __ push(eax);                                 // received
-
-      // result = receiver[f](arg);
-      __ bind(&l_call);
-      __ mov(load_receiver, Operand(esp, kPointerSize));
-      __ mov(LoadDescriptor::SlotRegister(),
-             Immediate(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
-      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
-      CallIC(ic, TypeFeedbackId::None());
-      __ mov(edi, eax);
-      __ mov(Operand(esp, 2 * kPointerSize), edi);
-      SetCallPosition(expr);
-      __ Set(eax, 1);
-      __ Call(
-          isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
-          RelocInfo::CODE_TARGET);
-
-      __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ Drop(1);  // The function is still on the stack; drop it.
-
-      // if (!result.done) goto l_try;
-      __ bind(&l_loop);
-      __ push(eax);                                      // save result
-      __ Move(load_receiver, eax);                       // result
-      __ mov(load_name,
-             isolate()->factory()->done_string());       // "done"
-      __ mov(LoadDescriptor::SlotRegister(),
-             Immediate(SmiFromSlot(expr->DoneFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);  // result.done in eax
-      Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
-      CallIC(bool_ic);
-      __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
-      __ j(not_equal, &l_try);
-
-      // result.value
-      __ pop(load_receiver);                              // result
-      __ mov(load_name,
-             isolate()->factory()->value_string());       // "value"
-      __ mov(LoadDescriptor::SlotRegister(),
-             Immediate(SmiFromSlot(expr->ValueFeedbackSlot())));
-      CallLoadIC(NOT_INSIDE_TYPEOF);                      // result.value in eax
-      context()->DropAndPlug(2, eax);                     // drop iter and g
-      break;
-    }
+    case Yield::kDelegating:
+      UNREACHABLE();
   }
 }
 
@@ -2001,7 +1863,13 @@
   // ebx will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
   VisitForAccumulatorValue(value);
-  __ pop(ebx);
+  PopOperand(ebx);
+
+  // Store input value into generator object.
+  __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), result_register());
+  __ mov(ecx, result_register());
+  __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, ecx, edx,
+                      kDontSaveFPRegs);
 
   // Load suspended function and context.
   __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
@@ -2051,6 +1919,7 @@
     __ add(edx, ecx);
     __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
            Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
     __ jmp(edx);
     __ bind(&slow_resume);
   }
@@ -2064,6 +1933,7 @@
   __ push(ecx);
   __ jmp(&push_operand_holes);
   __ bind(&call_resume);
+  __ Push(Smi::FromInt(resume_mode));  // Consumed in continuation.
   __ push(ebx);
   __ push(result_register());
   __ Push(Smi::FromInt(resume_mode));
@@ -2075,6 +1945,21 @@
   context()->Plug(result_register());
 }
 
+void FullCodeGenerator::PushOperand(MemOperand operand) {
+  OperandStackDepthIncrement(1);
+  __ Push(operand);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+  if (FLAG_debug_code) {
+    int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+                        operand_stack_depth_ * kPointerSize;
+    __ mov(eax, ebp);
+    __ sub(eax, esp);
+    __ cmp(eax, Immediate(expected_diff));
+    __ Assert(equal, kUnexpectedStackDepth);
+  }
+}
 
 void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
   Label allocate, done_allocate;
@@ -2110,37 +1995,7 @@
   __ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
   __ mov(LoadDescriptor::SlotRegister(),
          Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object.
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(prop->IsSuperAccess());
-
-  __ push(Immediate(key->value()));
-  __ push(Immediate(Smi::FromInt(language_mode())));
-  __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
-  SetExpressionPosition(prop);
-  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
-  __ mov(LoadDescriptor::SlotRegister(),
-         Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
-  CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object, key.
-  SetExpressionPosition(prop);
-  __ push(Immediate(Smi::FromInt(language_mode())));
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallLoadIC(NOT_INSIDE_TYPEOF);
 }
 
 
@@ -2151,7 +2006,7 @@
   // Do combined smi check of the operands. Left operand is on the
   // stack. Right operand is in eax.
   Label smi_case, done, stub_call;
-  __ pop(edx);
+  PopOperand(edx);
   __ mov(ecx, eax);
   __ or_(eax, edx);
   JumpPatchSite patch_site(masm_);
@@ -2159,8 +2014,7 @@
 
   __ bind(&stub_call);
   __ mov(eax, ecx);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ jmp(&done, Label::kNear);
@@ -2240,24 +2094,14 @@
 
 
 void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  // Constructor is in eax.
-  DCHECK(lit != NULL);
-  __ push(eax);
-
-  // No access check is needed here since the constructor is created by the
-  // class literal.
-  Register scratch = ebx;
-  __ mov(scratch, FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset));
-  __ Push(scratch);
-
   for (int i = 0; i < lit->properties()->length(); i++) {
     ObjectLiteral::Property* property = lit->properties()->at(i);
     Expression* value = property->value();
 
     if (property->is_static()) {
-      __ push(Operand(esp, kPointerSize));  // constructor
+      PushOperand(Operand(esp, kPointerSize));  // constructor
     } else {
-      __ push(Operand(esp, 0));  // prototype
+      PushOperand(Operand(esp, 0));  // prototype
     }
     EmitPropertyKey(property, lit->GetIdForProperty(i));
 
@@ -2281,31 +2125,28 @@
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();
       case ObjectLiteral::Property::COMPUTED:
-        __ CallRuntime(Runtime::kDefineClassMethod);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
         break;
 
       case ObjectLiteral::Property::GETTER:
-        __ push(Immediate(Smi::FromInt(DONT_ENUM)));
-        __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
         break;
 
       case ObjectLiteral::Property::SETTER:
-        __ push(Immediate(Smi::FromInt(DONT_ENUM)));
-        __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+        PushOperand(Smi::FromInt(DONT_ENUM));
+        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
         break;
     }
   }
-
-  // Set both the prototype and constructor to have fast properties, and also
-  // freeze them in strong mode.
-  __ CallRuntime(Runtime::kFinalizeClassDefinition);
 }
 
 
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
-  __ pop(edx);
-  Handle<Code> code =
-      CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+  PopOperand(edx);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
   CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
@@ -2328,10 +2169,10 @@
       break;
     }
     case NAMED_PROPERTY: {
-      __ push(eax);  // Preserve value.
+      PushOperand(eax);  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
       __ Move(StoreDescriptor::ReceiverRegister(), eax);
-      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      PopOperand(StoreDescriptor::ValueRegister());  // Restore value.
       __ mov(StoreDescriptor::NameRegister(),
              prop->key()->AsLiteral()->value());
       EmitLoadStoreICSlot(slot);
@@ -2339,7 +2180,7 @@
       break;
     }
     case NAMED_SUPER_PROPERTY: {
-      __ push(eax);
+      PushOperand(eax);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForAccumulatorValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2356,7 +2197,7 @@
       break;
     }
     case KEYED_SUPER_PROPERTY: {
-      __ push(eax);
+      PushOperand(eax);
       VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
       VisitForStackValue(
           prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2376,12 +2217,12 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ push(eax);  // Preserve value.
+      PushOperand(eax);  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ Move(StoreDescriptor::NameRegister(), eax);
-      __ pop(StoreDescriptor::ReceiverRegister());  // Receiver.
-      __ pop(StoreDescriptor::ValueRegister());     // Restore value.
+      PopOperand(StoreDescriptor::ReceiverRegister());  // Receiver.
+      PopOperand(StoreDescriptor::ValueRegister());     // Restore value.
       EmitLoadStoreICSlot(slot);
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2461,17 +2302,17 @@
              (var->mode() == CONST && op == Token::INIT)) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
-      __ push(eax);  // Value.
-      __ push(esi);  // Context.
-      __ push(Immediate(var->name()));
-      __ push(Immediate(Smi::FromInt(language_mode())));
-      __ CallRuntime(Runtime::kStoreLookupSlot);
+      __ Push(Immediate(var->name()));
+      __ Push(eax);
+      __ CallRuntime(is_strict(language_mode())
+                         ? Runtime::kStoreLookupSlot_Strict
+                         : Runtime::kStoreLookupSlot_Sloppy);
     } else {
       // Assignment to var or initializing assignment to let/const in harmony
       // mode.
       DCHECK(var->IsStackAllocated() || var->IsContextSlot());
       MemOperand location = VarOperand(var, ecx);
-      if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
         // Check for an uninitialized let binding.
         __ mov(edx, location);
         __ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2518,7 +2359,7 @@
   DCHECK(prop->key()->IsLiteral());
 
   __ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
-  __ pop(StoreDescriptor::ReceiverRegister());
+  PopOperand(StoreDescriptor::ReceiverRegister());
   EmitLoadStoreICSlot(expr->AssignmentSlot());
   CallStoreIC();
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2534,10 +2375,11 @@
   Literal* key = prop->key()->AsLiteral();
   DCHECK(key != NULL);
 
-  __ push(Immediate(key->value()));
-  __ push(eax);
-  __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
-                                             : Runtime::kStoreToSuper_Sloppy));
+  PushOperand(key->value());
+  PushOperand(eax);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreToSuper_Strict
+                              : Runtime::kStoreToSuper_Sloppy);
 }
 
 
@@ -2546,10 +2388,10 @@
   // eax : value
   // stack : receiver ('this'), home_object, key
 
-  __ push(eax);
-  __ CallRuntime((is_strict(language_mode())
-                      ? Runtime::kStoreKeyedToSuper_Strict
-                      : Runtime::kStoreKeyedToSuper_Sloppy));
+  PushOperand(eax);
+  CallRuntimeWithOperands(is_strict(language_mode())
+                              ? Runtime::kStoreKeyedToSuper_Strict
+                              : Runtime::kStoreKeyedToSuper_Sloppy);
 }
 
 
@@ -2559,8 +2401,8 @@
   // esp[0]            : key
   // esp[kPointerSize] : receiver
 
-  __ pop(StoreDescriptor::NameRegister());  // Key.
-  __ pop(StoreDescriptor::ReceiverRegister());
+  PopOperand(StoreDescriptor::NameRegister());  // Key.
+  PopOperand(StoreDescriptor::ReceiverRegister());
   DCHECK(StoreDescriptor::ValueRegister().is(eax));
   Handle<Code> ic =
       CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2592,7 +2434,7 @@
     if (!expr->IsSuperAccess()) {
       VisitForStackValue(expr->obj());
       VisitForAccumulatorValue(expr->key());
-      __ pop(LoadDescriptor::ReceiverRegister());                  // Object.
+      PopOperand(LoadDescriptor::ReceiverRegister());              // Object.
       __ Move(LoadDescriptor::NameRegister(), result_register());  // Key.
       EmitKeyedPropertyLoad(expr);
     } else {
@@ -2628,7 +2470,7 @@
     }
     // Push undefined as receiver. This is patched in the method prologue if it
     // is a sloppy mode method.
-    __ push(Immediate(isolate()->factory()->undefined_value()));
+    PushOperand(isolate()->factory()->undefined_value());
     convert_mode = ConvertReceiverMode::kNullOrUndefined;
   } else {
     // Load the function from the receiver.
@@ -2638,7 +2480,7 @@
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
-    __ push(Operand(esp, 0));
+    PushOperand(Operand(esp, 0));
     __ mov(Operand(esp, kPointerSize), eax);
     convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
   }
@@ -2660,19 +2502,17 @@
   SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
   VisitForStackValue(super_ref->home_object());
   VisitForAccumulatorValue(super_ref->this_var());
-  __ push(eax);
-  __ push(eax);
-  __ push(Operand(esp, kPointerSize * 2));
-  __ push(Immediate(key->value()));
-  __ push(Immediate(Smi::FromInt(language_mode())));
+  PushOperand(eax);
+  PushOperand(eax);
+  PushOperand(Operand(esp, kPointerSize * 2));
+  PushOperand(key->value());
   // Stack here:
   //  - home_object
   //  - this (receiver)
   //  - this (receiver) <-- LoadFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
 
   // Replace home_object with target function.
   __ mov(Operand(esp, kPointerSize), eax);
@@ -2700,7 +2540,7 @@
   PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
 
   // Push the target function under the receiver.
-  __ push(Operand(esp, 0));
+  PushOperand(Operand(esp, 0));
   __ mov(Operand(esp, kPointerSize), eax);
 
   EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2718,19 +2558,17 @@
   SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
   VisitForStackValue(super_ref->home_object());
   VisitForAccumulatorValue(super_ref->this_var());
-  __ push(eax);
-  __ push(eax);
-  __ push(Operand(esp, kPointerSize * 2));
+  PushOperand(eax);
+  PushOperand(eax);
+  PushOperand(Operand(esp, kPointerSize * 2));
   VisitForStackValue(prop->key());
-  __ push(Immediate(Smi::FromInt(language_mode())));
   // Stack here:
   //  - home_object
   //  - this (receiver)
   //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
   //  - home_object
   //  - key
-  //  - language_mode
-  __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
 
   // Replace home_object with target function.
   __ mov(Operand(esp, kPointerSize), eax);
@@ -2752,12 +2590,23 @@
 
   PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
   SetCallPosition(expr);
-  Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+  if (expr->tail_call_mode() == TailCallMode::kAllow) {
+    if (FLAG_trace) {
+      __ CallRuntime(Runtime::kTraceTailCall);
+    }
+    // Update profiling counters before the tail call since we will
+    // not return to this function.
+    EmitProfilingCounterHandlingForReturnSequence(true);
+  }
+  Handle<Code> ic =
+      CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+          .code();
   __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
   // Don't assign a type feedback id to the IC, since type feedback is provided
   // by the vector above.
   CallIC(ic);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
 
@@ -2803,11 +2652,10 @@
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in eax) and
     // the object holding it (returned in edx).
-    __ push(context_register());
-    __ push(Immediate(callee->name()));
-    __ CallRuntime(Runtime::kLoadLookupSlot);
-    __ push(eax);  // Function.
-    __ push(edx);  // Receiver.
+    __ Push(callee->name());
+    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+    PushOperand(eax);  // Function.
+    PushOperand(edx);  // Receiver.
     PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
 
     // If fast case code has been generated, emit code to push the function
@@ -2826,7 +2674,7 @@
   } else {
     VisitForStackValue(callee);
     // refEnv.WithBaseObject()
-    __ push(Immediate(isolate()->factory()->undefined_value()));
+    PushOperand(isolate()->factory()->undefined_value());
   }
 }
 
@@ -2858,7 +2706,10 @@
   SetCallPosition(expr);
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
   __ Set(eax, arg_count);
-  __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                      expr->tail_call_mode()),
+          RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2899,6 +2750,7 @@
 
   CallConstructStub stub(isolate());
   __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
   PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2917,7 +2769,7 @@
   __ AssertFunction(result_register());
   __ mov(result_register(),
          FieldOperand(result_register(), HeapObject::kMapOffset));
-  __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
+  PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
 
   // Push the arguments ("left-to-right") on the stack.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2939,6 +2791,7 @@
   __ mov(edi, Operand(esp, arg_count * kPointerSize));
 
   __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 
   RecordJSReturnSite(expr);
 
@@ -2991,77 +2844,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, SIMD128_VALUE_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, FIRST_FUNCTION_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(above_equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
-  __ CheckMap(eax, map, if_false, DO_SMI_CHECK);
-  // Check if the exponent half is 0x80000000. Comparing against 1 and
-  // checking for overflow is the shortest possible encoding.
-  __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x1));
-  __ j(no_overflow, if_false);
-  __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x0));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3150,68 +2932,6 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  VisitForStackValue(args->at(0));
-  VisitForAccumulatorValue(args->at(1));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ pop(ebx);
-  __ cmp(eax, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in edx and the formal
-  // parameter count in eax.
-  VisitForAccumulatorValue(args->at(0));
-  __ mov(edx, eax);
-  __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
-  ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  DCHECK(expr->arguments()->length() == 0);
-
-  Label exit;
-  // Get the number of formal parameters.
-  __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &exit);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  __ AssertSmi(eax);
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3280,28 +3000,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = nullptr;
-  Label* if_false = nullptr;
-  Label* fall_through = nullptr;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(3, args->length());
@@ -3314,8 +3012,8 @@
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
 
-  __ pop(value);
-  __ pop(index);
+  PopOperand(value);
+  PopOperand(index);
 
   if (FLAG_debug_code) {
     __ test(value, Immediate(kSmiTagMask));
@@ -3349,8 +3047,8 @@
   VisitForStackValue(args->at(0));        // index
   VisitForStackValue(args->at(1));        // value
   VisitForAccumulatorValue(args->at(2));  // string
-  __ pop(value);
-  __ pop(index);
+  PopOperand(value);
+  PopOperand(index);
 
   if (FLAG_debug_code) {
     __ test(value, Immediate(kSmiTagMask));
@@ -3371,35 +3069,6 @@
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-
-  VisitForStackValue(args->at(0));  // Load the object.
-  VisitForAccumulatorValue(args->at(1));  // Load the value.
-  __ pop(ebx);  // eax = value. ebx = object.
-
-  Label done;
-  // If the object is a smi, return the value.
-  __ JumpIfSmi(ebx, &done, Label::kNear);
-
-  // If the object is not a value type, return the value.
-  __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
-  __ j(not_equal, &done, Label::kNear);
-
-  // Store the value.
-  __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
-
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  __ mov(edx, eax);
-  __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
-
-  __ bind(&done);
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK_EQ(1, args->length());
@@ -3417,27 +3086,6 @@
 }
 
 
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK_EQ(1, args->length());
-
-  // Load the argument into eax and convert it.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Convert the object to a name.
-  Label convert, done_convert;
-  __ JumpIfSmi(eax, &convert, Label::kNear);
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  __ CmpObjectType(eax, LAST_NAME_TYPE, ecx);
-  __ j(below_equal, &done_convert, Label::kNear);
-  __ bind(&convert);
-  __ Push(eax);
-  __ CallRuntime(Runtime::kToName);
-  __ bind(&done_convert);
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -3468,7 +3116,7 @@
   Register index = eax;
   Register result = edx;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3515,7 +3163,7 @@
   Register scratch = edx;
   Register result = eax;
 
-  __ pop(object);
+  PopOperand(object);
 
   Label need_conversion;
   Label index_out_of_range;
@@ -3565,6 +3213,7 @@
   // Call the target.
   __ mov(eax, Immediate(argc));
   __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(argc + 1);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   // Discard the function left on TOS.
@@ -3621,275 +3270,6 @@
 }
 
 
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
-  Label bailout, done, one_char_separator, long_separator,
-      non_trivial_array, not_size_one_array, loop,
-      loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 2);
-  // We will leave the separator on the stack until the end of the function.
-  VisitForStackValue(args->at(1));
-  // Load this to eax (= array)
-  VisitForAccumulatorValue(args->at(0));
-  // All aliases of the same register have disjoint lifetimes.
-  Register array = eax;
-  Register elements = no_reg;  // Will be eax.
-
-  Register index = edx;
-
-  Register string_length = ecx;
-
-  Register string = esi;
-
-  Register scratch = ebx;
-
-  Register array_length = edi;
-  Register result_pos = no_reg;  // Will be edi.
-
-  // Separator operand is already pushed.
-  Operand separator_operand = Operand(esp, 2 * kPointerSize);
-  Operand result_operand = Operand(esp, 1 * kPointerSize);
-  Operand array_length_operand = Operand(esp, 0);
-  __ sub(esp, Immediate(2 * kPointerSize));
-  __ cld();
-  // Check that the array is a JSArray
-  __ JumpIfSmi(array, &bailout);
-  __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, &bailout);
-
-  // Check that the array has fast elements.
-  __ CheckFastElements(scratch, &bailout);
-
-  // If the array has length zero, return the empty string.
-  __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
-  __ SmiUntag(array_length);
-  __ j(not_zero, &non_trivial_array);
-  __ mov(result_operand, isolate()->factory()->empty_string());
-  __ jmp(&done);
-
-  // Save the array length.
-  __ bind(&non_trivial_array);
-  __ mov(array_length_operand, array_length);
-
-  // Save the FixedArray containing array's elements.
-  // End of array's live range.
-  elements = array;
-  __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
-  array = no_reg;
-
-
-  // Check that all array elements are sequential one-byte strings, and
-  // accumulate the sum of their lengths, as a smi-encoded value.
-  __ Move(index, Immediate(0));
-  __ Move(string_length, Immediate(0));
-  // Loop condition: while (index < length).
-  // Live loop registers: index, array_length, string,
-  //                      scratch, string_length, elements.
-  if (generate_debug_code_) {
-    __ cmp(index, array_length);
-    __ Assert(less, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
-  }
-  __ bind(&loop);
-  __ mov(string, FieldOperand(elements,
-                              index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ JumpIfSmi(string, &bailout);
-  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-  __ add(string_length,
-         FieldOperand(string, SeqOneByteString::kLengthOffset));
-  __ j(overflow, &bailout);
-  __ add(index, Immediate(1));
-  __ cmp(index, array_length);
-  __ j(less, &loop);
-
-  // If array_length is 1, return elements[0], a string.
-  __ cmp(array_length, 1);
-  __ j(not_equal, &not_size_one_array);
-  __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
-  __ mov(result_operand, scratch);
-  __ jmp(&done);
-
-  __ bind(&not_size_one_array);
-
-  // End of array_length live range.
-  result_pos = array_length;
-  array_length = no_reg;
-
-  // Live registers:
-  // string_length: Sum of string lengths, as a smi.
-  // elements: FixedArray of strings.
-
-  // Check that the separator is a flat one-byte string.
-  __ mov(string, separator_operand);
-  __ JumpIfSmi(string, &bailout);
-  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-
-  // Add (separator length times array_length) - separator length
-  // to string_length.
-  __ mov(scratch, separator_operand);
-  __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
-  __ sub(string_length, scratch);  // May be negative, temporarily.
-  __ imul(scratch, array_length_operand);
-  __ j(overflow, &bailout);
-  __ add(string_length, scratch);
-  __ j(overflow, &bailout);
-
-  __ shr(string_length, 1);
-
-  // Bailout for large object allocations.
-  __ cmp(string_length, Page::kMaxRegularHeapObjectSize);
-  __ j(greater, &bailout);
-
-  // Live registers and stack values:
-  //   string_length
-  //   elements
-  __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
-                           &bailout);
-  __ mov(result_operand, result_pos);
-  __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
-
-
-  __ mov(string, separator_operand);
-  __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
-         Immediate(Smi::FromInt(1)));
-  __ j(equal, &one_char_separator);
-  __ j(greater, &long_separator);
-
-
-  // Empty separator case
-  __ mov(index, Immediate(0));
-  __ jmp(&loop_1_condition);
-  // Loop condition: while (index < length).
-  __ bind(&loop_1);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-  //   elements: the FixedArray of strings we are joining.
-
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
-  __ bind(&loop_1_condition);
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_1);  // End while (index < length).
-  __ jmp(&done);
-
-
-
-  // One-character separator case
-  __ bind(&one_char_separator);
-  // Replace separator with its one-byte character value.
-  __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ mov_b(separator_operand, scratch);
-
-  __ Move(index, Immediate(0));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_2_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_2);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-
-  // Copy the separator character to the result.
-  __ mov_b(scratch, separator_operand);
-  __ mov_b(Operand(result_pos, 0), scratch);
-  __ inc(result_pos);
-
-  __ bind(&loop_2_entry);
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
-
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_2);  // End while (index < length).
-  __ jmp(&done);
-
-
-  // Long separator case (separator is more than one character).
-  __ bind(&long_separator);
-
-  __ Move(index, Immediate(0));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_3_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_3);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-
-  // Copy the separator to the result.
-  __ mov(string, separator_operand);
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-
-  __ bind(&loop_3_entry);
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqOneByteString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
-
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_3);  // End while (index < length).
-  __ jmp(&done);
-
-
-  __ bind(&bailout);
-  __ mov(result_operand, isolate()->factory()->undefined_value());
-  __ bind(&done);
-  __ mov(eax, result_operand);
-  // Drop temp values from the stack, and restore context register.
-  __ add(esp, Immediate(3 * kPointerSize));
-
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
   DCHECK(expr->arguments()->length() == 0);
   ExternalReference debug_is_active =
@@ -3922,7 +3302,7 @@
   __ jmp(&done, Label::kNear);
 
   __ bind(&runtime);
-  __ CallRuntime(Runtime::kCreateIterResultObject);
+  CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
 
   __ bind(&done);
   context()->Plug(eax);
@@ -3931,7 +3311,7 @@
 
 void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
   // Push undefined as receiver.
-  __ push(Immediate(isolate()->factory()->undefined_value()));
+  PushOperand(isolate()->factory()->undefined_value());
 
   __ LoadGlobalFunction(expr->context_index(), eax);
 }
@@ -3946,6 +3326,7 @@
   __ Set(eax, arg_count);
   __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
           RelocInfo::CODE_TARGET);
+  OperandStackDepthDecrement(arg_count + 1);
 }
 
 
@@ -3958,7 +3339,7 @@
     EmitLoadJSRuntimeFunction(expr);
 
     // Push the target function under the receiver.
-    __ push(Operand(esp, 0));
+    PushOperand(Operand(esp, 0));
     __ mov(Operand(esp, kPointerSize), eax);
 
     // Push the arguments ("left-to-right").
@@ -3993,6 +3374,7 @@
         // Call the C runtime function.
         PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
         __ CallRuntime(expr->function(), arg_count);
+        OperandStackDepthDecrement(arg_count);
         context()->Plug(eax);
       }
     }
@@ -4010,9 +3392,9 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ CallRuntime(is_strict(language_mode())
-                           ? Runtime::kDeleteProperty_Strict
-                           : Runtime::kDeleteProperty_Sloppy);
+        CallRuntimeWithOperands(is_strict(language_mode())
+                                    ? Runtime::kDeleteProperty_Strict
+                                    : Runtime::kDeleteProperty_Sloppy);
         context()->Plug(eax);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
@@ -4034,8 +3416,7 @@
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
-          __ push(context_register());
-          __ push(Immediate(var->name()));
+          __ Push(var->name());
           __ CallRuntime(Runtime::kDeleteLookupSlot);
           context()->Plug(eax);
         }
@@ -4080,6 +3461,7 @@
                         &materialize_false,
                         &materialize_true,
                         &materialize_true);
+        if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
         __ bind(&materialize_true);
         PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
         if (context()->IsAccumulatorValue()) {
@@ -4135,7 +3517,7 @@
   } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
-      __ push(Immediate(Smi::FromInt(0)));
+      PushOperand(Smi::FromInt(0));
     }
     switch (assign_type) {
       case NAMED_PROPERTY: {
@@ -4150,9 +3532,9 @@
         VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
         VisitForAccumulatorValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
-        __ push(result_register());
-        __ push(MemOperand(esp, kPointerSize));
-        __ push(result_register());
+        PushOperand(result_register());
+        PushOperand(MemOperand(esp, kPointerSize));
+        PushOperand(result_register());
         EmitNamedSuperPropertyLoad(prop);
         break;
       }
@@ -4162,10 +3544,10 @@
         VisitForStackValue(
             prop->obj()->AsSuperPropertyReference()->home_object());
         VisitForAccumulatorValue(prop->key());
-        __ push(result_register());
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(MemOperand(esp, 2 * kPointerSize));
-        __ push(result_register());
+        PushOperand(result_register());
+        PushOperand(MemOperand(esp, 2 * kPointerSize));
+        PushOperand(MemOperand(esp, 2 * kPointerSize));
+        PushOperand(result_register());
         EmitKeyedSuperPropertyLoad(prop);
         break;
       }
@@ -4255,7 +3637,7 @@
       // of the stack.
       switch (assign_type) {
         case VARIABLE:
-          __ push(eax);
+          PushOperand(eax);
           break;
         case NAMED_PROPERTY:
           __ mov(Operand(esp, kPointerSize), eax);
@@ -4279,8 +3661,8 @@
   __ bind(&stub_call);
   __ mov(edx, eax);
   __ mov(eax, Immediate(Smi::FromInt(1)));
-  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
-                                              strength(language_mode())).code();
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
   CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
@@ -4315,7 +3697,7 @@
     case NAMED_PROPERTY: {
       __ mov(StoreDescriptor::NameRegister(),
              prop->key()->AsLiteral()->value());
-      __ pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       EmitLoadStoreICSlot(expr->CountSlot());
       CallStoreIC();
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4351,8 +3733,8 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ pop(StoreDescriptor::NameRegister());
-      __ pop(StoreDescriptor::ReceiverRegister());
+      PopOperand(StoreDescriptor::NameRegister());
+      PopOperand(StoreDescriptor::ReceiverRegister());
       Handle<Code> ic =
           CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
       EmitLoadStoreICSlot(expr->CountSlot());
@@ -4407,8 +3789,8 @@
     __ cmp(eax, isolate()->factory()->false_value());
     Split(equal, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
-    __ cmp(eax, isolate()->factory()->undefined_value());
-    __ j(equal, if_true);
+    __ cmp(eax, isolate()->factory()->null_value());
+    __ j(equal, if_false);
     __ JumpIfSmi(eax, if_false);
     // Check for undetectable objects => true.
     __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -4473,7 +3855,7 @@
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
-      __ CallRuntime(Runtime::kHasProperty);
+      CallRuntimeWithOperands(Runtime::kHasProperty);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
@@ -4481,7 +3863,7 @@
 
     case Token::INSTANCEOF: {
       VisitForAccumulatorValue(expr->right());
-      __ Pop(edx);
+      PopOperand(edx);
       InstanceOfStub stub(isolate());
       __ CallStub(&stub);
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4493,7 +3875,7 @@
     default: {
       VisitForAccumulatorValue(expr->right());
       Condition cc = CompareIC::ComputeCondition(op);
-      __ pop(edx);
+      PopOperand(edx);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4507,8 +3889,7 @@
         __ bind(&slow_case);
       }
 
-      Handle<Code> ic = CodeFactory::CompareIC(
-                            isolate(), op, strength(language_mode())).code();
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
 
@@ -4588,15 +3969,15 @@
     // as their closure, not the anonymous closure containing the global
     // code.
     __ mov(eax, NativeContextOperand());
-    __ push(ContextOperand(eax, Context::CLOSURE_INDEX));
+    PushOperand(ContextOperand(eax, Context::CLOSURE_INDEX));
   } else if (closure_scope->is_eval_scope()) {
     // Contexts nested inside eval code have the same closure as the context
     // calling eval, not the anonymous closure containing the eval code.
     // Fetch it from the context.
-    __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
+    PushOperand(ContextOperand(esi, Context::CLOSURE_INDEX));
   } else {
     DCHECK(closure_scope->is_function_scope());
-    __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+    PushOperand(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   }
 }
 
@@ -4605,23 +3986,11 @@
 // Non-local control flow support.
 
 void FullCodeGenerator::EnterFinallyBlock() {
-  // Cook return address on top of stack (smi encoded Code* delta)
-  DCHECK(!result_register().is(edx));
-  __ pop(edx);
-  __ sub(edx, Immediate(masm_->CodeObject()));
-  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ SmiTag(edx);
-  __ push(edx);
-
-  // Store result register while executing finally block.
-  __ push(result_register());
-
   // Store pending message while executing finally block.
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ mov(edx, Operand::StaticVariable(pending_message_obj));
-  __ push(edx);
+  PushOperand(edx);
 
   ClearPendingMessage();
 }
@@ -4630,19 +3999,10 @@
 void FullCodeGenerator::ExitFinallyBlock() {
   DCHECK(!result_register().is(edx));
   // Restore pending message from stack.
-  __ pop(edx);
+  PopOperand(edx);
   ExternalReference pending_message_obj =
       ExternalReference::address_of_pending_message_obj(isolate());
   __ mov(Operand::StaticVariable(pending_message_obj), edx);
-
-  // Restore result register from stack.
-  __ pop(result_register());
-
-  // Uncook return address.
-  __ pop(edx);
-  __ SmiUntag(edx);
-  __ add(edx, Immediate(masm_->CodeObject()));
-  __ jmp(edx);
 }
 
 
@@ -4661,6 +4021,32 @@
          Immediate(SmiFromSlot(slot)));
 }
 
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+  DCHECK(!result_register().is(edx));
+  __ Pop(result_register());  // Restore the accumulator.
+  __ Pop(edx);                // Get the token.
+  for (DeferredCommand cmd : commands_) {
+    Label skip;
+    __ cmp(edx, Immediate(Smi::FromInt(cmd.token)));
+    __ j(not_equal, &skip);
+    switch (cmd.command) {
+      case kReturn:
+        codegen_->EmitUnwindAndReturn();
+        break;
+      case kThrow:
+        __ Push(result_register());
+        __ CallRuntime(Runtime::kReThrow);
+        break;
+      case kContinue:
+        codegen_->EmitContinue(cmd.target);
+        break;
+      case kBreak:
+        codegen_->EmitBreak(cmd.target);
+        break;
+    }
+    __ bind(&skip);
+  }
+}
 
 #undef __
 
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 4a72925..edd52b0 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -817,8 +817,6 @@
   while (callbacks->length() != 0) {
     auto callback = callbacks->RemoveLast();
     DCHECK(callback.node() == nullptr);
-    // No second pass callback required.
-    if (callback.callback() == nullptr) continue;
     // Fire second pass callback
     callback.Invoke(isolate);
   }
@@ -924,6 +922,7 @@
 int GlobalHandles::DispatchPendingPhantomCallbacks(
     bool synchronous_second_pass) {
   int freed_nodes = 0;
+  List<PendingPhantomCallback> second_pass_callbacks;
   {
     // The initial pass callbacks must simply clear the nodes.
     for (auto i = pending_phantom_callbacks_.begin();
@@ -932,24 +931,25 @@
       // Skip callbacks that have already been processed once.
       if (callback->node() == nullptr) continue;
       callback->Invoke(isolate());
+      if (callback->callback()) second_pass_callbacks.Add(*callback);
       freed_nodes++;
     }
   }
-  if (pending_phantom_callbacks_.length() > 0) {
+  pending_phantom_callbacks_.Clear();
+  if (second_pass_callbacks.length() > 0) {
     if (FLAG_optimize_for_size || FLAG_predictable || synchronous_second_pass) {
       isolate()->heap()->CallGCPrologueCallbacks(
           GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
-      InvokeSecondPassPhantomCallbacks(&pending_phantom_callbacks_, isolate());
+      InvokeSecondPassPhantomCallbacks(&second_pass_callbacks, isolate());
       isolate()->heap()->CallGCEpilogueCallbacks(
           GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
     } else {
       auto task = new PendingPhantomCallbacksSecondPassTask(
-          &pending_phantom_callbacks_, isolate());
+          &second_pass_callbacks, isolate());
       V8::GetCurrentPlatform()->CallOnForegroundThread(
           reinterpret_cast<v8::Isolate*>(isolate()), task);
     }
   }
-  pending_phantom_callbacks_.Clear();
   return freed_nodes;
 }
 
@@ -984,7 +984,7 @@
   int freed_nodes = 0;
   bool synchronous_second_pass =
       (gc_callback_flags &
-       (kGCCallbackFlagForced |
+       (kGCCallbackFlagForced | kGCCallbackFlagCollectAllAvailableGarbage |
         kGCCallbackFlagSynchronousPhantomCallbackProcessing)) != 0;
   freed_nodes += DispatchPendingPhantomCallbacks(synchronous_second_pass);
   if (initial_post_gc_processing_count != post_gc_processing_count_) {
diff --git a/src/globals.h b/src/globals.h
index 67bdb63..be401a6 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -529,7 +529,7 @@
 };
 
 // Flag indicating whether code is built into the VM (one of the natives files).
-enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
+enum NativesFlag { NOT_NATIVES_CODE, EXTENSION_CODE, NATIVES_CODE };
 
 // JavaScript defines two kinds of 'nil'.
 enum NilValue { kNullValue, kUndefinedValue };
@@ -754,6 +754,45 @@
   return os;
 }
 
+// Defines whether tail call optimization is allowed.
+enum class TailCallMode : unsigned { kAllow, kDisallow };
+
+inline size_t hash_value(TailCallMode mode) { return bit_cast<unsigned>(mode); }
+
+inline std::ostream& operator<<(std::ostream& os, TailCallMode mode) {
+  switch (mode) {
+    case TailCallMode::kAllow:
+      return os << "ALLOW_TAIL_CALLS";
+    case TailCallMode::kDisallow:
+      return os << "DISALLOW_TAIL_CALLS";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+// Defines specifics about arguments object or rest parameter creation.
+enum class CreateArgumentsType : uint8_t {
+  kMappedArguments,
+  kUnmappedArguments,
+  kRestParameter
+};
+
+inline size_t hash_value(CreateArgumentsType type) {
+  return bit_cast<uint8_t>(type);
+}
+
+inline std::ostream& operator<<(std::ostream& os, CreateArgumentsType type) {
+  switch (type) {
+    case CreateArgumentsType::kMappedArguments:
+      return os << "MAPPED_ARGUMENTS";
+    case CreateArgumentsType::kUnmappedArguments:
+      return os << "UNMAPPED_ARGUMENTS";
+    case CreateArgumentsType::kRestParameter:
+      return os << "REST_PARAMETER";
+  }
+  UNREACHABLE();
+  return os;
+}
 
 // Used to specify if a macro instruction must perform a smi check on tagged
 // values.
@@ -934,43 +973,37 @@
 
 enum Signedness { kSigned, kUnsigned };
 
-
 enum FunctionKind {
   kNormalFunction = 0,
   kArrowFunction = 1 << 0,
   kGeneratorFunction = 1 << 1,
   kConciseMethod = 1 << 2,
   kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
-  kAccessorFunction = 1 << 3,
-  kDefaultConstructor = 1 << 4,
-  kSubclassConstructor = 1 << 5,
-  kBaseConstructor = 1 << 6,
-  kInObjectLiteral = 1 << 7,
+  kDefaultConstructor = 1 << 3,
+  kSubclassConstructor = 1 << 4,
+  kBaseConstructor = 1 << 5,
+  kGetterFunction = 1 << 6,
+  kSetterFunction = 1 << 7,
+  kAccessorFunction = kGetterFunction | kSetterFunction,
   kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
   kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
   kClassConstructor =
       kBaseConstructor | kSubclassConstructor | kDefaultConstructor,
-  kConciseMethodInObjectLiteral = kConciseMethod | kInObjectLiteral,
-  kConciseGeneratorMethodInObjectLiteral =
-      kConciseGeneratorMethod | kInObjectLiteral,
-  kAccessorFunctionInObjectLiteral = kAccessorFunction | kInObjectLiteral,
 };
 
-
 inline bool IsValidFunctionKind(FunctionKind kind) {
   return kind == FunctionKind::kNormalFunction ||
          kind == FunctionKind::kArrowFunction ||
          kind == FunctionKind::kGeneratorFunction ||
          kind == FunctionKind::kConciseMethod ||
          kind == FunctionKind::kConciseGeneratorMethod ||
+         kind == FunctionKind::kGetterFunction ||
+         kind == FunctionKind::kSetterFunction ||
          kind == FunctionKind::kAccessorFunction ||
          kind == FunctionKind::kDefaultBaseConstructor ||
          kind == FunctionKind::kDefaultSubclassConstructor ||
          kind == FunctionKind::kBaseConstructor ||
-         kind == FunctionKind::kSubclassConstructor ||
-         kind == FunctionKind::kConciseMethodInObjectLiteral ||
-         kind == FunctionKind::kConciseGeneratorMethodInObjectLiteral ||
-         kind == FunctionKind::kAccessorFunctionInObjectLiteral;
+         kind == FunctionKind::kSubclassConstructor;
 }
 
 
@@ -991,6 +1024,15 @@
   return kind & FunctionKind::kConciseMethod;
 }
 
+inline bool IsGetterFunction(FunctionKind kind) {
+  DCHECK(IsValidFunctionKind(kind));
+  return kind & FunctionKind::kGetterFunction;
+}
+
+inline bool IsSetterFunction(FunctionKind kind) {
+  DCHECK(IsValidFunctionKind(kind));
+  return kind & FunctionKind::kSetterFunction;
+}
 
 inline bool IsAccessorFunction(FunctionKind kind) {
   DCHECK(IsValidFunctionKind(kind));
@@ -1024,24 +1066,21 @@
 
 inline bool IsConstructable(FunctionKind kind, LanguageMode mode) {
   if (IsAccessorFunction(kind)) return false;
-  if (IsConciseMethod(kind) && !IsGeneratorFunction(kind)) return false;
+  if (IsConciseMethod(kind)) return false;
   if (IsArrowFunction(kind)) return false;
+  if (IsGeneratorFunction(kind)) return false;
   if (is_strong(mode)) return IsClassConstructor(kind);
   return true;
 }
 
 
-inline bool IsInObjectLiteral(FunctionKind kind) {
-  DCHECK(IsValidFunctionKind(kind));
-  return kind & FunctionKind::kInObjectLiteral;
+inline uint32_t ObjectHash(Address address) {
+  // All objects are at least pointer aligned, so we can remove the trailing
+  // zeros.
+  return static_cast<uint32_t>(bit_cast<uintptr_t>(address) >>
+                               kPointerSizeLog2);
 }
 
-
-inline FunctionKind WithObjectLiteralBit(FunctionKind kind) {
-  kind = static_cast<FunctionKind>(kind | FunctionKind::kInObjectLiteral);
-  DCHECK(IsValidFunctionKind(kind));
-  return kind;
-}
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/heap-symbols.h b/src/heap-symbols.h
new file mode 100644
index 0000000..4a772eb
--- /dev/null
+++ b/src/heap-symbols.h
@@ -0,0 +1,203 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SYMBOLS_H_
+#define V8_HEAP_SYMBOLS_H_
+
+#define INTERNALIZED_STRING_LIST(V)                                \
+  V(anonymous_string, "anonymous")                                 \
+  V(apply_string, "apply")                                         \
+  V(assign_string, "assign")                                       \
+  V(arguments_string, "arguments")                                 \
+  V(Arguments_string, "Arguments")                                 \
+  V(Array_string, "Array")                                         \
+  V(bind_string, "bind")                                           \
+  V(bool16x8_string, "bool16x8")                                   \
+  V(Bool16x8_string, "Bool16x8")                                   \
+  V(bool32x4_string, "bool32x4")                                   \
+  V(Bool32x4_string, "Bool32x4")                                   \
+  V(bool8x16_string, "bool8x16")                                   \
+  V(Bool8x16_string, "Bool8x16")                                   \
+  V(boolean_string, "boolean")                                     \
+  V(Boolean_string, "Boolean")                                     \
+  V(bound__string, "bound ")                                       \
+  V(byte_length_string, "byteLength")                              \
+  V(byte_offset_string, "byteOffset")                              \
+  V(call_string, "call")                                           \
+  V(callee_string, "callee")                                       \
+  V(caller_string, "caller")                                       \
+  V(cell_value_string, "%cell_value")                              \
+  V(char_at_string, "CharAt")                                      \
+  V(closure_string, "(closure)")                                   \
+  V(compare_ic_string, "==")                                       \
+  V(configurable_string, "configurable")                           \
+  V(constructor_string, "constructor")                             \
+  V(construct_string, "construct")                                 \
+  V(create_string, "create")                                       \
+  V(Date_string, "Date")                                           \
+  V(default_string, "default")                                     \
+  V(defineProperty_string, "defineProperty")                       \
+  V(deleteProperty_string, "deleteProperty")                       \
+  V(display_name_string, "displayName")                            \
+  V(done_string, "done")                                           \
+  V(dot_result_string, ".result")                                  \
+  V(dot_string, ".")                                               \
+  V(entries_string, "entries")                                     \
+  V(enumerable_string, "enumerable")                               \
+  V(enumerate_string, "enumerate")                                 \
+  V(Error_string, "Error")                                         \
+  V(eval_string, "eval")                                           \
+  V(false_string, "false")                                         \
+  V(float32x4_string, "float32x4")                                 \
+  V(Float32x4_string, "Float32x4")                                 \
+  V(for_api_string, "for_api")                                     \
+  V(for_string, "for")                                             \
+  V(function_string, "function")                                   \
+  V(Function_string, "Function")                                   \
+  V(Generator_string, "Generator")                                 \
+  V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor")   \
+  V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
+  V(getPrototypeOf_string, "getPrototypeOf")                       \
+  V(get_string, "get")                                             \
+  V(global_string, "global")                                       \
+  V(has_string, "has")                                             \
+  V(illegal_access_string, "illegal access")                       \
+  V(illegal_argument_string, "illegal argument")                   \
+  V(index_string, "index")                                         \
+  V(infinity_string, "Infinity")                                   \
+  V(input_string, "input")                                         \
+  V(int16x8_string, "int16x8")                                     \
+  V(Int16x8_string, "Int16x8")                                     \
+  V(int32x4_string, "int32x4")                                     \
+  V(Int32x4_string, "Int32x4")                                     \
+  V(int8x16_string, "int8x16")                                     \
+  V(Int8x16_string, "Int8x16")                                     \
+  V(isExtensible_string, "isExtensible")                           \
+  V(isView_string, "isView")                                       \
+  V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic")           \
+  V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic")         \
+  V(last_index_string, "lastIndex")                                \
+  V(length_string, "length")                                       \
+  V(Map_string, "Map")                                             \
+  V(minus_infinity_string, "-Infinity")                            \
+  V(minus_zero_string, "-0")                                       \
+  V(name_string, "name")                                           \
+  V(nan_string, "NaN")                                             \
+  V(next_string, "next")                                           \
+  V(null_string, "null")                                           \
+  V(null_to_string, "[object Null]")                               \
+  V(number_string, "number")                                       \
+  V(Number_string, "Number")                                       \
+  V(object_string, "object")                                       \
+  V(Object_string, "Object")                                       \
+  V(ownKeys_string, "ownKeys")                                     \
+  V(preventExtensions_string, "preventExtensions")                 \
+  V(private_api_string, "private_api")                             \
+  V(Promise_string, "Promise")                                     \
+  V(proto_string, "__proto__")                                     \
+  V(prototype_string, "prototype")                                 \
+  V(Proxy_string, "Proxy")                                         \
+  V(query_colon_string, "(?:)")                                    \
+  V(RegExp_string, "RegExp")                                       \
+  V(setPrototypeOf_string, "setPrototypeOf")                       \
+  V(set_string, "set")                                             \
+  V(Set_string, "Set")                                             \
+  V(source_mapping_url_string, "source_mapping_url")               \
+  V(source_string, "source")                                       \
+  V(source_url_string, "source_url")                               \
+  V(stack_string, "stack")                                         \
+  V(strict_compare_ic_string, "===")                               \
+  V(string_string, "string")                                       \
+  V(String_string, "String")                                       \
+  V(symbol_string, "symbol")                                       \
+  V(Symbol_string, "Symbol")                                       \
+  V(this_string, "this")                                           \
+  V(throw_string, "throw")                                         \
+  V(toJSON_string, "toJSON")                                       \
+  V(toString_string, "toString")                                   \
+  V(true_string, "true")                                           \
+  V(uint16x8_string, "uint16x8")                                   \
+  V(Uint16x8_string, "Uint16x8")                                   \
+  V(uint32x4_string, "uint32x4")                                   \
+  V(Uint32x4_string, "Uint32x4")                                   \
+  V(uint8x16_string, "uint8x16")                                   \
+  V(Uint8x16_string, "Uint8x16")                                   \
+  V(undefined_string, "undefined")                                 \
+  V(undefined_to_string, "[object Undefined]")                     \
+  V(valueOf_string, "valueOf")                                     \
+  V(values_string, "values")                                       \
+  V(value_string, "value")                                         \
+  V(WeakMap_string, "WeakMap")                                     \
+  V(WeakSet_string, "WeakSet")                                     \
+  V(writable_string, "writable")
+
+#define PRIVATE_SYMBOL_LIST(V)              \
+  V(array_iteration_kind_symbol)            \
+  V(array_iterator_next_symbol)             \
+  V(array_iterator_object_symbol)           \
+  V(call_site_function_symbol)              \
+  V(call_site_position_symbol)              \
+  V(call_site_receiver_symbol)              \
+  V(call_site_strict_symbol)                \
+  V(class_end_position_symbol)              \
+  V(class_start_position_symbol)            \
+  V(detailed_stack_trace_symbol)            \
+  V(elements_transition_symbol)             \
+  V(error_end_pos_symbol)                   \
+  V(error_script_symbol)                    \
+  V(error_start_pos_symbol)                 \
+  V(formatted_stack_trace_symbol)           \
+  V(frozen_symbol)                          \
+  V(hash_code_symbol)                       \
+  V(hidden_properties_symbol)               \
+  V(home_object_symbol)                     \
+  V(internal_error_symbol)                  \
+  V(intl_impl_object_symbol)                \
+  V(intl_initialized_marker_symbol)         \
+  V(intl_pattern_symbol)                    \
+  V(intl_resolved_symbol)                   \
+  V(megamorphic_symbol)                     \
+  V(native_context_index_symbol)            \
+  V(nonexistent_symbol)                     \
+  V(nonextensible_symbol)                   \
+  V(normal_ic_symbol)                       \
+  V(not_mapped_symbol)                      \
+  V(observed_symbol)                        \
+  V(premonomorphic_symbol)                  \
+  V(promise_combined_deferred_symbol)       \
+  V(promise_debug_marker_symbol)            \
+  V(promise_has_handler_symbol)             \
+  V(promise_on_resolve_symbol)              \
+  V(promise_on_reject_symbol)               \
+  V(promise_raw_symbol)                     \
+  V(promise_status_symbol)                  \
+  V(promise_value_symbol)                   \
+  V(sealed_symbol)                          \
+  V(stack_trace_symbol)                     \
+  V(strict_function_transition_symbol)      \
+  V(string_iterator_iterated_string_symbol) \
+  V(string_iterator_next_index_symbol)      \
+  V(strong_function_transition_symbol)      \
+  V(uninitialized_symbol)
+
+#define PUBLIC_SYMBOL_LIST(V)                \
+  V(iterator_symbol, Symbol.iterator)        \
+  V(match_symbol, Symbol.match)              \
+  V(replace_symbol, Symbol.replace)          \
+  V(search_symbol, Symbol.search)            \
+  V(species_symbol, Symbol.species)          \
+  V(split_symbol, Symbol.split)              \
+  V(to_primitive_symbol, Symbol.toPrimitive) \
+  V(unscopables_symbol, Symbol.unscopables)
+
+// Well-Known Symbols are "Public" symbols, which have a bit set which causes
+// them to produce an undefined value when a load results in a failed access
+// check. Because this behaviour is not specified properly as of yet, it only
+// applies to a subset of spec-defined Well-Known Symbols.
+#define WELL_KNOWN_SYMBOL_LIST(V)                           \
+  V(has_instance_symbol, Symbol.hasInstance)                \
+  V(is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
+  V(to_string_tag_symbol, Symbol.toStringTag)
+
+#endif  // V8_HEAP_SYMBOLS_H_
diff --git a/src/heap/array-buffer-tracker.cc b/src/heap/array-buffer-tracker.cc
index bbe3c6b..6e389c1 100644
--- a/src/heap/array-buffer-tracker.cc
+++ b/src/heap/array-buffer-tracker.cc
@@ -77,6 +77,7 @@
 
 
 void ArrayBufferTracker::MarkLive(JSArrayBuffer* buffer) {
+  base::LockGuard<base::Mutex> guard(&mutex_);
   void* data = buffer->backing_store();
 
   // ArrayBuffer might be in the middle of being constructed.
@@ -123,6 +124,8 @@
 
 
 void ArrayBufferTracker::Promote(JSArrayBuffer* buffer) {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+
   if (buffer->is_external()) return;
   void* data = buffer->backing_store();
   if (!data) return;
diff --git a/src/heap/array-buffer-tracker.h b/src/heap/array-buffer-tracker.h
index 7ba22fb..6130003 100644
--- a/src/heap/array-buffer-tracker.h
+++ b/src/heap/array-buffer-tracker.h
@@ -7,6 +7,7 @@
 
 #include <map>
 
+#include "src/base/platform/mutex.h"
 #include "src/globals.h"
 
 namespace v8 {
@@ -47,6 +48,7 @@
   void Promote(JSArrayBuffer* buffer);
 
  private:
+  base::Mutex mutex_;
   Heap* heap_;
 
   // |live_array_buffers_| maps externally allocated memory used as backing
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
index a723b3b..57e6cc4 100644
--- a/src/heap/heap-inl.h
+++ b/src/heap/heap-inl.h
@@ -359,10 +359,6 @@
   return result;
 }
 
-
-bool Heap::InNewSpace(Address address) { return new_space_.Contains(address); }
-
-
 bool Heap::InFromSpace(Object* object) {
   return new_space_.FromSpaceContains(object);
 }
@@ -372,14 +368,15 @@
   return new_space_.ToSpaceContains(object);
 }
 
+bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
 
-bool Heap::InOldSpace(Address address) { return old_space_->Contains(address); }
-
-
-bool Heap::InOldSpace(Object* object) {
-  return InOldSpace(reinterpret_cast<Address>(object));
+bool Heap::InNewSpaceSlow(Address address) {
+  return new_space_.ContainsSlow(address);
 }
 
+bool Heap::InOldSpaceSlow(Address address) {
+  return old_space_->ContainsSlow(address);
+}
 
 bool Heap::OldGenerationAllocationLimitReached() {
   if (!incremental_marking()->IsStopped()) return false;
@@ -394,18 +391,13 @@
          (!page->ContainsLimit(age_mark) || old_address < age_mark);
 }
 
-
-void Heap::RecordWrite(Address address, int offset) {
-  if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
-}
-
-
-void Heap::RecordWrites(Address address, int start, int len) {
-  if (!InNewSpace(address)) {
-    for (int i = 0; i < len; i++) {
-      store_buffer_.Mark(address + start + i * kPointerSize);
-    }
+void Heap::RecordWrite(Object* object, int offset, Object* o) {
+  if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
+    return;
   }
+  Page* page = Page::FromAddress(reinterpret_cast<Address>(object));
+  Address slot = HeapObject::cast(object)->address() + offset;
+  RememberedSet<OLD_TO_NEW>::Insert(page, slot);
 }
 
 
@@ -467,7 +459,7 @@
   }
 }
 
-
+template <Heap::FindMementoMode mode>
 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
   // Check if there is potentially a memento behind the object. If
   // the last word of the memento is on another page we return
@@ -476,61 +468,77 @@
   Address memento_address = object_address + object->Size();
   Address last_memento_word_address = memento_address + kPointerSize;
   if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
-    return NULL;
+    return nullptr;
   }
-
   HeapObject* candidate = HeapObject::FromAddress(memento_address);
   Map* candidate_map = candidate->map();
   // This fast check may peek at an uninitialized word. However, the slow check
   // below (memento_address == top) ensures that this is safe. Mark the word as
   // initialized to silence MemorySanitizer warnings.
   MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
-  if (candidate_map != allocation_memento_map()) return NULL;
+  if (candidate_map != allocation_memento_map()) {
+    return nullptr;
+  }
+  AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
 
-  // Either the object is the last object in the new space, or there is another
-  // object of at least word size (the header map word) following it, so
-  // suffices to compare ptr and top here. Note that technically we do not have
-  // to compare with the current top pointer of the from space page during GC,
-  // since we always install filler objects above the top pointer of a from
-  // space page when performing a garbage collection. However, always performing
-  // the test makes it possible to have a single, unified version of
-  // FindAllocationMemento that is used both by the GC and the mutator.
-  Address top = NewSpaceTop();
-  DCHECK(memento_address == top ||
-         memento_address + HeapObject::kHeaderSize <= top ||
-         !NewSpacePage::OnSamePage(memento_address, top - 1));
-  if (memento_address == top) return NULL;
-
-  AllocationMemento* memento = AllocationMemento::cast(candidate);
-  if (!memento->IsValid()) return NULL;
-  return memento;
+  // Depending on what the memento is used for, we might need to perform
+  // additional checks.
+  Address top;
+  switch (mode) {
+    case Heap::kForGC:
+      return memento_candidate;
+    case Heap::kForRuntime:
+      if (memento_candidate == nullptr) return nullptr;
+      // Either the object is the last object in the new space, or there is
+      // another object of at least word size (the header map word) following
+      // it, so suffices to compare ptr and top here.
+      top = NewSpaceTop();
+      DCHECK(memento_address == top ||
+             memento_address + HeapObject::kHeaderSize <= top ||
+             !NewSpacePage::OnSamePage(memento_address, top - 1));
+      if ((memento_address != top) && memento_candidate->IsValid()) {
+        return memento_candidate;
+      }
+      return nullptr;
+    default:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return nullptr;
 }
 
-
+template <Heap::UpdateAllocationSiteMode mode>
 void Heap::UpdateAllocationSite(HeapObject* object,
                                 HashMap* pretenuring_feedback) {
   DCHECK(InFromSpace(object));
   if (!FLAG_allocation_site_pretenuring ||
       !AllocationSite::CanTrack(object->map()->instance_type()))
     return;
-  AllocationMemento* memento = FindAllocationMemento(object);
-  if (memento == nullptr) return;
+  AllocationMemento* memento_candidate = FindAllocationMemento<kForGC>(object);
+  if (memento_candidate == nullptr) return;
 
-  AllocationSite* key = memento->GetAllocationSite();
-  DCHECK(!key->IsZombie());
-
-  if (pretenuring_feedback == global_pretenuring_feedback_) {
+  if (mode == kGlobal) {
+    DCHECK_EQ(pretenuring_feedback, global_pretenuring_feedback_);
+    // Entering global pretenuring feedback is only used in the scavenger, where
+    // we are allowed to actually touch the allocation site.
+    if (!memento_candidate->IsValid()) return;
+    AllocationSite* site = memento_candidate->GetAllocationSite();
+    DCHECK(!site->IsZombie());
     // For inserting in the global pretenuring storage we need to first
     // increment the memento found count on the allocation site.
-    if (key->IncrementMementoFoundCount()) {
-      global_pretenuring_feedback_->LookupOrInsert(
-          key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
+    if (site->IncrementMementoFoundCount()) {
+      global_pretenuring_feedback_->LookupOrInsert(site,
+                                                   ObjectHash(site->address()));
     }
   } else {
-    // Any other pretenuring storage than the global one is used as a cache,
-    // where the count is later on merge in the allocation site.
-    HashMap::Entry* e = pretenuring_feedback->LookupOrInsert(
-        key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
+    DCHECK_EQ(mode, kCached);
+    DCHECK_NE(pretenuring_feedback, global_pretenuring_feedback_);
+    // Entering cached feedback is used in the parallel case. We are not allowed
+    // to dereference the allocation site and rather have to postpone all checks
+    // till actually merging the data.
+    Address key = memento_candidate->GetAllocationSiteUnchecked();
+    HashMap::Entry* e =
+        pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
     DCHECK(e != nullptr);
     (*bit_cast<intptr_t*>(&e->value))++;
   }
@@ -614,9 +622,18 @@
 #endif
 }
 
+// static
+int DescriptorLookupCache::Hash(Object* source, Name* name) {
+  DCHECK(name->IsUniqueName());
+  // Uses only lower 32 bits if pointers are larger.
+  uint32_t source_hash =
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
+      kPointerSizeLog2;
+  uint32_t name_hash = name->hash_field();
+  return (source_hash ^ name_hash) % kLength;
+}
 
 int DescriptorLookupCache::Lookup(Map* source, Name* name) {
-  if (!name->IsUniqueName()) return kAbsent;
   int index = Hash(source, name);
   Key& key = keys_[index];
   if ((key.source == source) && (key.name == name)) return results_[index];
@@ -626,13 +643,11 @@
 
 void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
   DCHECK(result != kAbsent);
-  if (name->IsUniqueName()) {
-    int index = Hash(source, name);
-    Key& key = keys_[index];
-    key.source = source;
-    key.name = name;
-    results_[index] = result;
-  }
+  int index = Hash(source, name);
+  Key& key = keys_[index];
+  key.source = source;
+  key.name = name;
+  results_[index] = result;
 }
 
 
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 84b3c79..1c9be1a 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -27,6 +27,7 @@
 #include "src/heap/object-stats.h"
 #include "src/heap/objects-visiting-inl.h"
 #include "src/heap/objects-visiting.h"
+#include "src/heap/remembered-set.h"
 #include "src/heap/scavenge-job.h"
 #include "src/heap/scavenger-inl.h"
 #include "src/heap/store-buffer.h"
@@ -37,6 +38,7 @@
 #include "src/snapshot/natives.h"
 #include "src/snapshot/serialize.h"
 #include "src/snapshot/snapshot.h"
+#include "src/tracing/trace-event.h"
 #include "src/type-feedback-vector.h"
 #include "src/utils.h"
 #include "src/v8.h"
@@ -53,10 +55,10 @@
   StrongRootsList* next;
 };
 
-class IdleScavengeObserver : public InlineAllocationObserver {
+class IdleScavengeObserver : public AllocationObserver {
  public:
   IdleScavengeObserver(Heap& heap, intptr_t step_size)
-      : InlineAllocationObserver(step_size), heap_(heap) {}
+      : AllocationObserver(step_size), heap_(heap) {}
 
   void Step(int bytes_allocated, Address, size_t) override {
     heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
@@ -77,7 +79,6 @@
       reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
       max_semi_space_size_(8 * (kPointerSize / 4) * MB),
       initial_semispace_size_(Page::kPageSize),
-      target_semispace_size_(Page::kPageSize),
       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
       initial_old_generation_size_(max_old_generation_size_ /
                                    kInitalOldGenerationLimitFactor),
@@ -94,7 +95,6 @@
       contexts_disposed_(0),
       number_of_disposed_maps_(0),
       global_ic_age_(0),
-      scan_on_scavenge_pages_(0),
       new_space_(this),
       old_space_(NULL),
       code_space_(NULL),
@@ -114,7 +114,6 @@
       old_gen_exhausted_(false),
       optimize_for_memory_usage_(false),
       inline_allocation_disabled_(false),
-      store_buffer_rebuilder_(store_buffer()),
       total_regexp_code_generated_(0),
       tracer_(nullptr),
       high_survival_rate_period_length_(0),
@@ -454,8 +453,6 @@
   ReportStatisticsBeforeGC();
 #endif  // DEBUG
 
-  store_buffer()->GCPrologue();
-
   if (isolate()->concurrent_osr_enabled()) {
     isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs();
   }
@@ -467,6 +464,7 @@
   }
   CheckNewSpaceExpansionCriteria();
   UpdateNewSpaceAllocationCounter();
+  store_buffer()->MoveEntriesToRememberedSet();
 }
 
 
@@ -519,17 +517,19 @@
     if (map_word.IsForwardingAddress()) {
       site = AllocationSite::cast(map_word.ToForwardingAddress());
     }
-    DCHECK(site->IsAllocationSite());
+
+    // We have not validated the allocation site yet, since we have not
+    // dereferenced the site during collecting information.
+    // This is an inlined check of AllocationMemento::IsValid.
+    if (!site->IsAllocationSite() || site->IsZombie()) continue;
+
     int value =
         static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
     DCHECK_GT(value, 0);
 
-    {
-      // TODO(mlippautz): For parallel processing we need synchronization here.
-      if (site->IncrementMementoFoundCount(value)) {
-        global_pretenuring_feedback_->LookupOrInsert(
-            site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
-      }
+    if (site->IncrementMementoFoundCount(value)) {
+      global_pretenuring_feedback_->LookupOrInsert(site,
+                                                   ObjectHash(site->address()));
     }
   }
 }
@@ -567,22 +567,24 @@
     bool maximum_size_scavenge = MaximumSizeScavenge();
     for (HashMap::Entry* e = global_pretenuring_feedback_->Start();
          e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
+      allocation_sites++;
       site = reinterpret_cast<AllocationSite*>(e->key);
       int found_count = site->memento_found_count();
-      // The fact that we have an entry in the storage means that we've found
-      // the site at least once.
-      DCHECK_GT(found_count, 0);
-      DCHECK(site->IsAllocationSite());
-      allocation_sites++;
-      active_allocation_sites++;
-      allocation_mementos_found += found_count;
-      if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
-        trigger_deoptimization = true;
-      }
-      if (site->GetPretenureMode() == TENURED) {
-        tenure_decisions++;
-      } else {
-        dont_tenure_decisions++;
+      // An entry in the storage does not imply that the count is > 0 because
+      // allocation sites might have been reset due to too many objects dying
+      // in old space.
+      if (found_count > 0) {
+        DCHECK(site->IsAllocationSite());
+        active_allocation_sites++;
+        allocation_mementos_found += found_count;
+        if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
+          trigger_deoptimization = true;
+        }
+        if (site->GetPretenureMode() == TENURED) {
+          tenure_decisions++;
+        } else {
+          dont_tenure_decisions++;
+        }
       }
     }
 
@@ -639,8 +641,6 @@
 
 
 void Heap::GarbageCollectionEpilogue() {
-  store_buffer()->GCEpilogue();
-
   // In release mode, we only zap the from space under heap verification.
   if (Heap::ShouldZapGarbage()) {
     ZapFromSpace();
@@ -769,8 +769,7 @@
       if (!maybe_code->IsCode()) break;
       Code* code = Code::cast(maybe_code);
       int offset = Smi::cast(elements->get(j + 3))->value();
-      Address pc = code->address() + offset;
-      int pos = code->SourcePosition(pc);
+      int pos = code->SourcePosition(offset);
       elements->set(j + 2, Smi::FromInt(pos));
     }
   }
@@ -819,6 +818,7 @@
   GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
   HistogramTimerScope incremental_marking_scope(
       isolate()->counters()->gc_incremental_marking_finalize());
+  TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
 
   {
     GCCallbacksScope scope(this);
@@ -860,7 +860,6 @@
   }
 }
 
-
 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
                              const v8::GCCallbackFlags gc_callback_flags) {
   // Since we are ignoring the return value, the exact choice of space does
@@ -896,7 +895,7 @@
   const int kMinNumberOfAttempts = 2;
   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
     if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
-                        v8::kGCCallbackFlagForced) &&
+                        v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
         attempt + 1 >= kMinNumberOfAttempts) {
       break;
     }
@@ -1008,7 +1007,9 @@
     GarbageCollectionPrologue();
 
     {
-      HistogramTimerScope histogram_timer_scope(GCTypeTimer(collector));
+      HistogramTimer* gc_type_timer = GCTypeTimer(collector);
+      HistogramTimerScope histogram_timer_scope(gc_type_timer);
+      TRACE_EVENT0("v8", gc_type_timer->name());
 
       next_gc_likely_to_collect_more =
           PerformGarbageCollection(collector, gc_callback_flags);
@@ -1042,7 +1043,8 @@
   }
 
   if (collector == MARK_COMPACTOR &&
-      (gc_callback_flags & kGCCallbackFlagForced) != 0) {
+      (gc_callback_flags & (kGCCallbackFlagForced |
+                            kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
     isolate()->CountUsage(v8::Isolate::kForcedGC);
   }
 
@@ -1062,9 +1064,9 @@
     tracer()->ResetSurvivalEvents();
     old_generation_size_configured_ = false;
     MemoryReducer::Event event;
-    event.type = MemoryReducer::kContextDisposed;
+    event.type = MemoryReducer::kPossibleGarbage;
     event.time_ms = MonotonicallyIncreasingTimeInMs();
-    memory_reducer_->NotifyContextDisposed(event);
+    memory_reducer_->NotifyPossibleGarbage(event);
   }
   if (isolate()->concurrent_recompilation_enabled()) {
     // Flush the queued recompilation tasks.
@@ -1103,10 +1105,8 @@
   MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
   if (!InNewSpace(array)) {
     for (int i = 0; i < len; i++) {
-      // TODO(hpayer): check store buffer for entries
-      if (InNewSpace(dst_objects[i])) {
-        RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
-      }
+      RecordWrite(array, array->OffsetOfElementAt(dst_index + i),
+                  dst_objects[i]);
     }
   }
   incremental_marking()->RecordWrites(array);
@@ -1420,7 +1420,7 @@
 
 
 void Heap::MarkCompact() {
-  PauseInlineAllocationObserversScope pause_observers(new_space());
+  PauseAllocationObserversScope pause_observers(this);
 
   gc_state_ = MARK_COMPACT;
   LOG(isolate_, ResourceEvent("markcompact", "begin"));
@@ -1552,12 +1552,6 @@
 }
 
 
-void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
-                                       StoreBufferEvent event) {
-  heap->store_buffer_rebuilder_.Callback(page, event);
-}
-
-
 void PromotionQueue::Initialize() {
   // The last to-space page may be used for promotion queue. On promotion
   // conflict, we use the emergency stack.
@@ -1627,7 +1621,7 @@
 
   // Bump-pointer allocations done during scavenge are not real allocations.
   // Pause the inline allocation steps.
-  PauseInlineAllocationObserversScope pause_observers(new_space());
+  PauseAllocationObserversScope pause_observers(this);
 
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
@@ -1638,9 +1632,6 @@
   // Implements Cheney's copying algorithm
   LOG(isolate_, ResourceEvent("scavenge", "begin"));
 
-  // Clear descriptor cache.
-  isolate_->descriptor_lookup_cache()->Clear();
-
   // Used for updating survived_since_last_expansion_ at function end.
   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
 
@@ -1690,9 +1681,8 @@
     // Copy objects reachable from the old generation.
     GCTracer::Scope gc_scope(tracer(),
                              GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
-    StoreBufferRebuildScope scope(this, store_buffer(),
-                                  &ScavengeStoreBufferCallback);
-    store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject);
+    RememberedSet<OLD_TO_NEW>::IterateWithWrapper(this,
+                                                  Scavenger::ScavengeObject);
   }
 
   {
@@ -1946,8 +1936,6 @@
 
     // Promote and process all the to-be-promoted objects.
     {
-      StoreBufferRebuildScope scope(this, store_buffer(),
-                                    &ScavengeStoreBufferCallback);
       while (!promotion_queue()->is_empty()) {
         HeapObject* target;
         int size;
@@ -2099,6 +2087,7 @@
   AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
   if (!allocation.To(&result)) return allocation;
 
+  isolate()->counters()->maps_created()->Increment();
   result->set_map_no_write_barrier(meta_map());
   Map* map = Map::cast(result);
   map->set_instance_type(instance_type);
@@ -2271,6 +2260,7 @@
   if (FLAG_unbox_double_fields) {
     null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
   }
+  null_map()->set_is_undetectable();
 
   // Fix prototype object for existing maps.
   meta_map()->set_prototype(null_value());
@@ -2281,6 +2271,7 @@
 
   undefined_map()->set_prototype(null_value());
   undefined_map()->set_constructor_or_backpointer(null_value());
+  undefined_map()->set_is_undetectable();
 
   null_map()->set_prototype(null_value());
   null_map()->set_constructor_or_backpointer(null_value());
@@ -2415,14 +2406,6 @@
       ByteArray* byte_array;
       if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
       set_empty_byte_array(byte_array);
-
-      BytecodeArray* bytecode_array = nullptr;
-      AllocationResult allocation =
-          AllocateBytecodeArray(0, nullptr, 0, 0, empty_fixed_array());
-      if (!allocation.To(&bytecode_array)) {
-        return false;
-      }
-      set_empty_bytecode_array(bytecode_array);
     }
 
 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
@@ -2664,7 +2647,7 @@
   set_arguments_marker(
       *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
                            handle(Smi::FromInt(-4), isolate()), "undefined",
-                           Oddball::kArgumentMarker));
+                           Oddball::kArgumentsMarker));
 
   set_no_interceptor_result_sentinel(*factory->NewOddball(
       factory->no_interceptor_result_sentinel_map(),
@@ -2685,17 +2668,6 @@
     roots_[constant_string_table[i].index] = *str;
   }
 
-  // The {hidden_string} is special because it is an empty string, but does not
-  // match any string (even the {empty_string}) when looked up in properties.
-  // Allocate the hidden string which is used to identify the hidden properties
-  // in JSObjects. The hash code has a special value so that it will not match
-  // the empty string when searching for the property. It cannot be part of the
-  // loop above because it needs to be allocated manually with the special
-  // hash code in place. The hash code for the hidden_string is zero to ensure
-  // that it will always be at the first entry in property descriptors.
-  set_hidden_string(*factory->NewOneByteInternalizedString(
-      OneByteVector("", 0), String::kEmptyStringHash));
-
   // Create the code_stubs dictionary. The initial size is set to avoid
   // expanding the dictionary during bootstrapping.
   set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
@@ -2724,6 +2696,14 @@
 #undef SYMBOL_INIT
   }
 
+  // The {hidden_properties_symbol} is special because it is the only name with
+  // hash code zero. This ensures that it will always be the first entry as
+  // sorted by hash code in descriptor arrays. It is used to identify the hidden
+  // properties in JSObjects.
+  // kIsNotArrayIndexMask is a computed hash with value zero.
+  Symbol::cast(roots_[khidden_properties_symbolRootIndex])
+      ->set_hash_field(Name::kIsNotArrayIndexMask);
+
   {
     HandleScope scope(isolate());
 #define SYMBOL_INIT(name, description)                                      \
@@ -2872,15 +2852,14 @@
   cell->set_value(the_hole_value());
   set_empty_property_cell(*cell);
 
+  Handle<PropertyCell> species_cell = factory->NewPropertyCell();
+  species_cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+  set_species_protector(*species_cell);
+
   set_weak_stack_trace_list(Smi::FromInt(0));
 
   set_noscript_shared_function_infos(Smi::FromInt(0));
 
-  // Will be filled in by Interpreter::Initialize().
-  set_interpreter_table(
-      *interpreter::Interpreter::CreateUninitializedInterpreterTable(
-          isolate()));
-
   // Initialize keyed lookup cache.
   isolate_->keyed_lookup_cache()->Clear();
 
@@ -3055,7 +3034,10 @@
   instance->set_length(length);
   instance->set_frame_size(frame_size);
   instance->set_parameter_count(parameter_count);
+  instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
   instance->set_constant_pool(constant_pool);
+  instance->set_handler_table(empty_fixed_array());
+  instance->set_source_position_table(empty_fixed_array());
   CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
 
   return result;
@@ -3087,6 +3069,9 @@
 bool Heap::CanMoveObjectStart(HeapObject* object) {
   if (!FLAG_move_object_start) return false;
 
+  // Sampling heap profiler may have a reference to the object.
+  if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
+
   Address address = object->address();
 
   if (lo_space()->Contains(object)) return false;
@@ -3098,7 +3083,7 @@
   // (3) the page was already concurrently swept. This case is an optimization
   // for concurrent sweeping. The WasSwept predicate for concurrently swept
   // pages is set after sweeping all pages.
-  return !InOldSpace(address) || page->WasSwept() || page->SweepingCompleted();
+  return !InOldSpace(object) || page->SweepingDone();
 }
 
 
@@ -3133,6 +3118,10 @@
   DCHECK(!lo_space()->Contains(object));
   DCHECK(object->map() != fixed_cow_array_map());
 
+  // Ensure that the no handle-scope has more than one pointer to the same
+  // backing-store.
+  SLOW_DCHECK(CountHandlesForObject(object) <= 1);
+
   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
@@ -3161,6 +3150,11 @@
 
   // Maintain consistency of live bytes during incremental marking
   Marking::TransferMark(this, object->address(), new_start);
+  if (mark_compact_collector()->sweeping_in_progress()) {
+    // Array trimming during sweeping can add invalid slots in free list.
+    ClearRecordedSlotRange(object, former_start,
+                           HeapObject::RawField(new_object, 0));
+  }
   AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
 
   // Notify the heap profiler of change in object layout.
@@ -3210,7 +3204,8 @@
   }
 
   // Calculate location of new array end.
-  Address new_end = object->address() + object->Size() - bytes_to_trim;
+  Address old_end = object->address() + object->Size();
+  Address new_end = old_end - bytes_to_trim;
 
   // Technically in new space this write might be omitted (except for
   // debug mode which iterates through the heap), but to play safer
@@ -3220,6 +3215,11 @@
   // of the object changed significantly.
   if (!lo_space()->Contains(object)) {
     CreateFillerObjectAt(new_end, bytes_to_trim);
+    if (mark_compact_collector()->sweeping_in_progress()) {
+      // Array trimming during sweeping can add invalid slots in free list.
+      ClearRecordedSlotRange(object, reinterpret_cast<Object**>(new_end),
+                             reinterpret_cast<Object**>(old_end));
+    }
   }
 
   // Initialize header of the trimmed array. We are storing the new length
@@ -3366,6 +3366,25 @@
   return new_code;
 }
 
+AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
+  int size = BytecodeArray::SizeFor(bytecode_array->length());
+  HeapObject* result = nullptr;
+  {
+    AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map_no_write_barrier(bytecode_array_map());
+  BytecodeArray* copy = BytecodeArray::cast(result);
+  copy->set_length(bytecode_array->length());
+  copy->set_frame_size(bytecode_array->frame_size());
+  copy->set_parameter_count(bytecode_array->parameter_count());
+  copy->set_constant_pool(bytecode_array->constant_pool());
+  copy->set_handler_table(bytecode_array->handler_table());
+  copy->set_source_position_table(bytecode_array->source_position_table());
+  bytecode_array->CopyBytecodesTo(copy);
+  return copy;
+}
 
 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
   // Allocate ByteArray before the Code object, so that we do not risk
@@ -3470,7 +3489,6 @@
   if (start_offset == map->instance_size()) return;
   DCHECK_LT(start_offset, map->instance_size());
 
-  Object* filler;
   // We cannot always fill with one_pointer_filler_map because objects
   // created from API functions expect their internal fields to be initialized
   // with undefined_value.
@@ -3480,15 +3498,17 @@
 
   // In case of Array subclassing the |map| could already be transitioned
   // to different elements kind from the initial map on which we track slack.
-  Map* initial_map = map->FindRootMap();
-  if (initial_map->IsInobjectSlackTrackingInProgress()) {
-    // We might want to shrink the object later.
-    filler = Heap::one_pointer_filler_map();
+  bool in_progress = map->IsInobjectSlackTrackingInProgress();
+  Object* filler;
+  if (in_progress) {
+    filler = one_pointer_filler_map();
   } else {
-    filler = Heap::undefined_value();
+    filler = undefined_value();
   }
   obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
-  initial_map->InobjectSlackTrackingStep();
+  if (in_progress) {
+    map->FindRootMap()->InobjectSlackTrackingStep();
+  }
 }
 
 
@@ -3513,7 +3533,8 @@
 
   // Initialize the JSObject.
   InitializeJSObjectFromMap(js_obj, properties, map);
-  DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements());
+  DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
+         js_obj->HasFastStringWrapperElements());
   return js_obj;
 }
 
@@ -3804,18 +3825,41 @@
     AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
     if (!allocation.To(&obj)) return allocation;
   }
+
   obj->set_map_no_write_barrier(fixed_array_map());
   FixedArray* result = FixedArray::cast(obj);
   result->set_length(new_len);
 
   // Copy the content.
   DisallowHeapAllocation no_gc;
-  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+  WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
   for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
   MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
   return result;
 }
 
+AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
+                                          PretenureFlag pretenure) {
+  if (new_len == 0) return empty_fixed_array();
+
+  DCHECK_LE(new_len, src->length());
+
+  HeapObject* obj = nullptr;
+  {
+    AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
+    if (!allocation.To(&obj)) return allocation;
+  }
+  obj->set_map_no_write_barrier(fixed_array_map());
+
+  FixedArray* result = FixedArray::cast(obj);
+  result->set_length(new_len);
+
+  // Copy the content.
+  DisallowHeapAllocation no_gc;
+  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+  for (int i = 0; i < new_len; i++) result->set(i, src->get(i), mode);
+  return result;
+}
 
 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
   int len = src->length();
@@ -3824,13 +3868,12 @@
     AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
     if (!allocation.To(&obj)) return allocation;
   }
+  obj->set_map_no_write_barrier(map);
   if (InNewSpace(obj)) {
-    obj->set_map_no_write_barrier(map);
     CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
               FixedArray::SizeFor(len) - kPointerSize);
     return obj;
   }
-  obj->set_map_no_write_barrier(map);
   FixedArray* result = FixedArray::cast(obj);
   result->set_length(len);
 
@@ -4097,6 +4140,20 @@
   return committed - used > used + kSlack;
 }
 
+void Heap::SetOptimizeForMemoryUsage() {
+  // Activate memory reducer when switching to background if
+  // - there was no mark compact since the start.
+  // - the committed memory can be potentially reduced.
+  // 2 pages for the old, code, and map space + 1 page for new space.
+  const int kMinCommittedMemory = 7 * Page::kPageSize;
+  if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory) {
+    MemoryReducer::Event event;
+    event.type = MemoryReducer::kPossibleGarbage;
+    event.time_ms = MonotonicallyIncreasingTimeInMs();
+    memory_reducer_->NotifyPossibleGarbage(event);
+  }
+  optimize_for_memory_usage_ = true;
+}
 
 void Heap::ReduceNewSpaceSize() {
   // TODO(ulan): Unify this constant with the similar constant in
@@ -4189,6 +4246,7 @@
     case DO_FULL_GC: {
       DCHECK(contexts_disposed_ > 0);
       HistogramTimerScope scope(isolate_->counters()->gc_context());
+      TRACE_EVENT0("v8", "V8.GCContext");
       CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
       break;
     }
@@ -4274,6 +4332,7 @@
       static_cast<double>(base::Time::kMillisecondsPerSecond);
   HistogramTimerScope idle_notification_scope(
       isolate_->counters()->gc_idle_notification());
+  TRACE_EVENT0("v8", "V8.GCIdleNotification");
   double start_ms = MonotonicallyIncreasingTimeInMs();
   double idle_time_in_ms = deadline_in_ms - start_ms;
 
@@ -4354,38 +4413,65 @@
 
 #endif  // DEBUG
 
-bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
-
-
-bool Heap::Contains(Address addr) {
-  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
+bool Heap::Contains(HeapObject* value) {
+  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
+    return false;
+  }
   return HasBeenSetUp() &&
-         (new_space_.ToSpaceContains(addr) || old_space_->Contains(addr) ||
-          code_space_->Contains(addr) || map_space_->Contains(addr) ||
-          lo_space_->SlowContains(addr));
+         (new_space_.ToSpaceContains(value) || old_space_->Contains(value) ||
+          code_space_->Contains(value) || map_space_->Contains(value) ||
+          lo_space_->Contains(value));
 }
 
+bool Heap::ContainsSlow(Address addr) {
+  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) {
+    return false;
+  }
+  return HasBeenSetUp() &&
+         (new_space_.ToSpaceContainsSlow(addr) ||
+          old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
+          map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
+}
 
 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
-  return InSpace(value->address(), space);
-}
-
-
-bool Heap::InSpace(Address addr, AllocationSpace space) {
-  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
+  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
+    return false;
+  }
   if (!HasBeenSetUp()) return false;
 
   switch (space) {
     case NEW_SPACE:
-      return new_space_.ToSpaceContains(addr);
+      return new_space_.ToSpaceContains(value);
     case OLD_SPACE:
-      return old_space_->Contains(addr);
+      return old_space_->Contains(value);
     case CODE_SPACE:
-      return code_space_->Contains(addr);
+      return code_space_->Contains(value);
     case MAP_SPACE:
-      return map_space_->Contains(addr);
+      return map_space_->Contains(value);
     case LO_SPACE:
-      return lo_space_->SlowContains(addr);
+      return lo_space_->Contains(value);
+  }
+  UNREACHABLE();
+  return false;
+}
+
+bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
+  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) {
+    return false;
+  }
+  if (!HasBeenSetUp()) return false;
+
+  switch (space) {
+    case NEW_SPACE:
+      return new_space_.ToSpaceContainsSlow(addr);
+    case OLD_SPACE:
+      return old_space_->ContainsSlow(addr);
+    case CODE_SPACE:
+      return code_space_->ContainsSlow(addr);
+    case MAP_SPACE:
+      return map_space_->ContainsSlow(addr);
+    case LO_SPACE:
+      return lo_space_->ContainsSlow(addr);
   }
   UNREACHABLE();
   return false;
@@ -4429,8 +4515,6 @@
   CHECK(HasBeenSetUp());
   HandleScope scope(isolate());
 
-  store_buffer()->Verify();
-
   if (mark_compact_collector()->sweeping_in_progress()) {
     // We have to wait here for the sweeper threads to have an iterable heap.
     mark_compact_collector()->EnsureSweepingCompleted();
@@ -4478,14 +4562,11 @@
                                              Address end, bool record_slots,
                                              ObjectSlotCallback callback) {
   Address slot_address = start;
+  Page* page = Page::FromAddress(start);
 
   while (slot_address < end) {
     Object** slot = reinterpret_cast<Object**>(slot_address);
     Object* target = *slot;
-    // If the store buffer becomes overfull we mark pages as being exempt from
-    // the store buffer.  These pages are scanned to find pointers that point
-    // to the new space.  In that case we may hit newly promoted objects and
-    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
     if (target->IsHeapObject()) {
       if (Heap::InFromSpace(target)) {
         callback(reinterpret_cast<HeapObject**>(slot),
@@ -4494,8 +4575,7 @@
         if (InNewSpace(new_target)) {
           SLOW_DCHECK(Heap::InToSpace(new_target));
           SLOW_DCHECK(new_target->IsHeapObject());
-          store_buffer_.EnterDirectlyIntoStoreBuffer(
-              reinterpret_cast<Address>(slot));
+          RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
         }
         SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
       } else if (record_slots &&
@@ -4590,10 +4670,6 @@
   Relocatable::Iterate(isolate_, v);
   v->Synchronize(VisitorSynchronization::kRelocatable);
 
-  if (isolate_->deoptimizer_data() != NULL) {
-    isolate_->deoptimizer_data()->Iterate(v);
-  }
-  v->Synchronize(VisitorSynchronization::kDebug);
   isolate_->compilation_cache()->Iterate(v);
   v->Synchronize(VisitorSynchronization::kCompilationCache);
 
@@ -4607,8 +4683,10 @@
   // on scavenge collections.
   if (mode != VISIT_ALL_IN_SCAVENGE) {
     isolate_->builtins()->IterateBuiltins(v);
+    v->Synchronize(VisitorSynchronization::kBuiltins);
+    isolate_->interpreter()->IterateDispatchTable(v);
+    v->Synchronize(VisitorSynchronization::kDispatchTable);
   }
-  v->Synchronize(VisitorSynchronization::kBuiltins);
 
   // Iterate over global handles.
   switch (mode) {
@@ -4746,31 +4824,6 @@
 
   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
 
-  if (FLAG_target_semi_space_size > 0) {
-    int target_semispace_size = FLAG_target_semi_space_size * MB;
-    if (target_semispace_size < initial_semispace_size_) {
-      target_semispace_size_ = initial_semispace_size_;
-      if (FLAG_trace_gc) {
-        PrintIsolate(isolate_,
-                     "Target semi-space size cannot be less than the minimum "
-                     "semi-space size of %d MB\n",
-                     initial_semispace_size_ / MB);
-      }
-    } else if (target_semispace_size > max_semi_space_size_) {
-      target_semispace_size_ = max_semi_space_size_;
-      if (FLAG_trace_gc) {
-        PrintIsolate(isolate_,
-                     "Target semi-space size cannot be less than the maximum "
-                     "semi-space size of %d MB\n",
-                     max_semi_space_size_ / MB);
-      }
-    } else {
-      target_semispace_size_ = ROUND_UP(target_semispace_size, Page::kPageSize);
-    }
-  }
-
-  target_semispace_size_ = Max(initial_semispace_size_, target_semispace_size_);
-
   if (FLAG_semi_space_growth_factor < 2) {
     FLAG_semi_space_growth_factor = 2;
   }
@@ -5167,7 +5220,7 @@
 
   idle_scavenge_observer_ = new IdleScavengeObserver(
       *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
-  new_space()->AddInlineAllocationObserver(idle_scavenge_observer_);
+  new_space()->AddAllocationObserver(idle_scavenge_observer_);
 
   return true;
 }
@@ -5267,7 +5320,7 @@
     PrintAlloctionsHash();
   }
 
-  new_space()->RemoveInlineAllocationObserver(idle_scavenge_observer_);
+  new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
   delete idle_scavenge_observer_;
   idle_scavenge_observer_ = nullptr;
 
@@ -5476,6 +5529,32 @@
 
 #endif
 
+#ifdef ENABLE_SLOW_DCHECKS
+
+class CountHandleVisitor : public ObjectVisitor {
+ public:
+  explicit CountHandleVisitor(Object* object) : object_(object) {}
+
+  void VisitPointers(Object** start, Object** end) override {
+    for (Object** p = start; p < end; p++) {
+      if (object_ == reinterpret_cast<Object*>(*p)) count_++;
+    }
+  }
+
+  int count() { return count_; }
+
+ private:
+  Object* object_;
+  int count_ = 0;
+};
+
+int Heap::CountHandlesForObject(Object* object) {
+  CountHandleVisitor v(object);
+  isolate_->handle_scope_implementer()->Iterate(&v);
+  return v.count();
+}
+#endif
+
 class CheckHandleCountVisitor : public ObjectVisitor {
  public:
   CheckHandleCountVisitor() : handle_count_(0) {}
@@ -5496,6 +5575,27 @@
   isolate_->handle_scope_implementer()->Iterate(&v);
 }
 
+void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
+  if (!InNewSpace(object)) {
+    store_buffer()->MoveEntriesToRememberedSet();
+    Address slot_addr = reinterpret_cast<Address>(slot);
+    Page* page = Page::FromAddress(slot_addr);
+    DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+    RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
+  }
+}
+
+void Heap::ClearRecordedSlotRange(HeapObject* object, Object** start,
+                                  Object** end) {
+  if (!InNewSpace(object)) {
+    store_buffer()->MoveEntriesToRememberedSet();
+    Address start_addr = reinterpret_cast<Address>(start);
+    Address end_addr = reinterpret_cast<Address>(end);
+    Page* page = Page::FromAddress(start_addr);
+    DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+    RememberedSet<OLD_TO_NEW>::RemoveRange(page, start_addr, end_addr);
+  }
+}
 
 Space* AllSpaces::next() {
   switch (counter_++) {
@@ -6099,19 +6199,6 @@
 }
 
 
-void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
-  if (chunks_queued_for_free_ == NULL) return;
-  MemoryChunk* next;
-  MemoryChunk* chunk;
-  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
-    next = chunk->next_chunk();
-    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
-  }
-  store_buffer()->Compact();
-  store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
-}
-
-
 void Heap::FreeQueuedChunks() {
   if (chunks_queued_for_free_ != NULL) {
     if (FLAG_concurrent_sweeping) {
diff --git a/src/heap/heap.h b/src/heap/heap.h
index af9d0a6..4a76777 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -14,6 +14,7 @@
 #include "src/assert-scope.h"
 #include "src/atomic-utils.h"
 #include "src/globals.h"
+#include "src/heap-symbols.h"
 // TODO(mstarzinger): Two more includes to kill!
 #include "src/heap/spaces.h"
 #include "src/heap/store-buffer.h"
@@ -36,7 +37,6 @@
   V(Oddball, true_value, TrueValue)                                            \
   V(Oddball, false_value, FalseValue)                                          \
   V(String, empty_string, empty_string)                                        \
-  V(String, hidden_string, hidden_string)                                      \
   V(Oddball, uninitialized_value, UninitializedValue)                          \
   V(Map, cell_map, CellMap)                                                    \
   V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
@@ -187,11 +187,9 @@
   V(PropertyCell, empty_property_cell, EmptyPropertyCell)                      \
   V(Object, weak_stack_trace_list, WeakStackTraceList)                         \
   V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos)       \
-  V(FixedArray, interpreter_table, InterpreterTable)                           \
   V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
   V(WeakCell, empty_weak_cell, EmptyWeakCell)                                  \
-  V(BytecodeArray, empty_bytecode_array, EmptyBytecodeArray)
-
+  V(PropertyCell, species_protector, SpeciesProtector)
 
 // Entries in this list are limited to Smis and are not visited during GC.
 #define SMI_ROOT_LIST(V)                                                   \
@@ -209,196 +207,6 @@
   SMI_ROOT_LIST(V)    \
   V(StringTable, string_table, StringTable)
 
-#define INTERNALIZED_STRING_LIST(V)                              \
-  V(anonymous_string, "anonymous")                               \
-  V(apply_string, "apply")                                       \
-  V(assign_string, "assign")                                     \
-  V(arguments_string, "arguments")                               \
-  V(Arguments_string, "Arguments")                               \
-  V(Array_string, "Array")                                       \
-  V(bind_string, "bind")                                         \
-  V(bool16x8_string, "bool16x8")                                 \
-  V(Bool16x8_string, "Bool16x8")                                 \
-  V(bool32x4_string, "bool32x4")                                 \
-  V(Bool32x4_string, "Bool32x4")                                 \
-  V(bool8x16_string, "bool8x16")                                 \
-  V(Bool8x16_string, "Bool8x16")                                 \
-  V(boolean_string, "boolean")                                   \
-  V(Boolean_string, "Boolean")                                   \
-  V(bound__string, "bound ")                                     \
-  V(byte_length_string, "byteLength")                            \
-  V(byte_offset_string, "byteOffset")                            \
-  V(call_string, "call")                                         \
-  V(callee_string, "callee")                                     \
-  V(caller_string, "caller")                                     \
-  V(cell_value_string, "%cell_value")                            \
-  V(char_at_string, "CharAt")                                    \
-  V(closure_string, "(closure)")                                 \
-  V(compare_ic_string, "==")                                     \
-  V(configurable_string, "configurable")                         \
-  V(constructor_string, "constructor")                           \
-  V(construct_string, "construct")                               \
-  V(create_string, "create")                                     \
-  V(Date_string, "Date")                                         \
-  V(default_string, "default")                                   \
-  V(defineProperty_string, "defineProperty")                     \
-  V(deleteProperty_string, "deleteProperty")                     \
-  V(display_name_string, "displayName")                          \
-  V(done_string, "done")                                         \
-  V(dot_result_string, ".result")                                \
-  V(dot_string, ".")                                             \
-  V(enumerable_string, "enumerable")                             \
-  V(enumerate_string, "enumerate")                               \
-  V(Error_string, "Error")                                       \
-  V(eval_string, "eval")                                         \
-  V(false_string, "false")                                       \
-  V(float32x4_string, "float32x4")                               \
-  V(Float32x4_string, "Float32x4")                               \
-  V(for_api_string, "for_api")                                   \
-  V(for_string, "for")                                           \
-  V(function_string, "function")                                 \
-  V(Function_string, "Function")                                 \
-  V(Generator_string, "Generator")                               \
-  V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
-  V(getPrototypeOf_string, "getPrototypeOf")                     \
-  V(get_string, "get")                                           \
-  V(global_string, "global")                                     \
-  V(has_string, "has")                                           \
-  V(illegal_access_string, "illegal access")                     \
-  V(illegal_argument_string, "illegal argument")                 \
-  V(index_string, "index")                                       \
-  V(infinity_string, "Infinity")                                 \
-  V(input_string, "input")                                       \
-  V(int16x8_string, "int16x8")                                   \
-  V(Int16x8_string, "Int16x8")                                   \
-  V(int32x4_string, "int32x4")                                   \
-  V(Int32x4_string, "Int32x4")                                   \
-  V(int8x16_string, "int8x16")                                   \
-  V(Int8x16_string, "Int8x16")                                   \
-  V(isExtensible_string, "isExtensible")                         \
-  V(isView_string, "isView")                                     \
-  V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic")         \
-  V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic")       \
-  V(last_index_string, "lastIndex")                              \
-  V(length_string, "length")                                     \
-  V(Map_string, "Map")                                           \
-  V(minus_infinity_string, "-Infinity")                          \
-  V(minus_zero_string, "-0")                                     \
-  V(name_string, "name")                                         \
-  V(nan_string, "NaN")                                           \
-  V(next_string, "next")                                         \
-  V(null_string, "null")                                         \
-  V(null_to_string, "[object Null]")                             \
-  V(number_string, "number")                                     \
-  V(Number_string, "Number")                                     \
-  V(object_string, "object")                                     \
-  V(Object_string, "Object")                                     \
-  V(ownKeys_string, "ownKeys")                                   \
-  V(preventExtensions_string, "preventExtensions")               \
-  V(private_api_string, "private_api")                           \
-  V(Promise_string, "Promise")                                   \
-  V(proto_string, "__proto__")                                   \
-  V(prototype_string, "prototype")                               \
-  V(Proxy_string, "Proxy")                                       \
-  V(query_colon_string, "(?:)")                                  \
-  V(RegExp_string, "RegExp")                                     \
-  V(setPrototypeOf_string, "setPrototypeOf")                     \
-  V(set_string, "set")                                           \
-  V(Set_string, "Set")                                           \
-  V(source_mapping_url_string, "source_mapping_url")             \
-  V(source_string, "source")                                     \
-  V(source_url_string, "source_url")                             \
-  V(stack_string, "stack")                                       \
-  V(strict_compare_ic_string, "===")                             \
-  V(string_string, "string")                                     \
-  V(String_string, "String")                                     \
-  V(symbol_string, "symbol")                                     \
-  V(Symbol_string, "Symbol")                                     \
-  V(this_string, "this")                                         \
-  V(throw_string, "throw")                                       \
-  V(toJSON_string, "toJSON")                                     \
-  V(toString_string, "toString")                                 \
-  V(true_string, "true")                                         \
-  V(uint16x8_string, "uint16x8")                                 \
-  V(Uint16x8_string, "Uint16x8")                                 \
-  V(uint32x4_string, "uint32x4")                                 \
-  V(Uint32x4_string, "Uint32x4")                                 \
-  V(uint8x16_string, "uint8x16")                                 \
-  V(Uint8x16_string, "Uint8x16")                                 \
-  V(undefined_string, "undefined")                               \
-  V(undefined_to_string, "[object Undefined]")                   \
-  V(valueOf_string, "valueOf")                                   \
-  V(value_string, "value")                                       \
-  V(WeakMap_string, "WeakMap")                                   \
-  V(WeakSet_string, "WeakSet")                                   \
-  V(writable_string, "writable")
-
-#define PRIVATE_SYMBOL_LIST(V)              \
-  V(array_iteration_kind_symbol)            \
-  V(array_iterator_next_symbol)             \
-  V(array_iterator_object_symbol)           \
-  V(call_site_function_symbol)              \
-  V(call_site_position_symbol)              \
-  V(call_site_receiver_symbol)              \
-  V(call_site_strict_symbol)                \
-  V(class_end_position_symbol)              \
-  V(class_start_position_symbol)            \
-  V(detailed_stack_trace_symbol)            \
-  V(elements_transition_symbol)             \
-  V(error_end_pos_symbol)                   \
-  V(error_script_symbol)                    \
-  V(error_start_pos_symbol)                 \
-  V(formatted_stack_trace_symbol)           \
-  V(frozen_symbol)                          \
-  V(hash_code_symbol)                       \
-  V(home_object_symbol)                     \
-  V(internal_error_symbol)                  \
-  V(intl_impl_object_symbol)                \
-  V(intl_initialized_marker_symbol)         \
-  V(intl_pattern_symbol)                    \
-  V(intl_resolved_symbol)                   \
-  V(megamorphic_symbol)                     \
-  V(native_context_index_symbol)            \
-  V(nonexistent_symbol)                     \
-  V(nonextensible_symbol)                   \
-  V(normal_ic_symbol)                       \
-  V(not_mapped_symbol)                      \
-  V(observed_symbol)                        \
-  V(premonomorphic_symbol)                  \
-  V(promise_combined_deferred_symbol)       \
-  V(promise_debug_marker_symbol)            \
-  V(promise_has_handler_symbol)             \
-  V(promise_on_resolve_symbol)              \
-  V(promise_on_reject_symbol)               \
-  V(promise_raw_symbol)                     \
-  V(promise_status_symbol)                  \
-  V(promise_value_symbol)                   \
-  V(sealed_symbol)                          \
-  V(stack_trace_symbol)                     \
-  V(strict_function_transition_symbol)      \
-  V(string_iterator_iterated_string_symbol) \
-  V(string_iterator_next_index_symbol)      \
-  V(strong_function_transition_symbol)      \
-  V(uninitialized_symbol)
-
-#define PUBLIC_SYMBOL_LIST(V)                \
-  V(has_instance_symbol, Symbol.hasInstance) \
-  V(iterator_symbol, Symbol.iterator)        \
-  V(match_symbol, Symbol.match)              \
-  V(replace_symbol, Symbol.replace)          \
-  V(search_symbol, Symbol.search)            \
-  V(species_symbol, Symbol.species)          \
-  V(split_symbol, Symbol.split)              \
-  V(to_primitive_symbol, Symbol.toPrimitive) \
-  V(unscopables_symbol, Symbol.unscopables)
-
-// Well-Known Symbols are "Public" symbols, which have a bit set which causes
-// them to produce an undefined value when a load results in a failed access
-// check. Because this behaviour is not specified properly as of yet, it only
-// applies to a subset of spec-defined Well-Known Symbols.
-#define WELL_KNOWN_SYMBOL_LIST(V)                           \
-  V(is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
-  V(to_string_tag_symbol, Symbol.toStringTag)
 
 // Heap roots that are known to be immortal immovable, for which we can safely
 // skip write barriers. This list is not complete and has omissions.
@@ -443,7 +251,6 @@
   V(OrderedHashTableMap)                \
   V(EmptyFixedArray)                    \
   V(EmptyByteArray)                     \
-  V(EmptyBytecodeArray)                 \
   V(EmptyDescriptorArray)               \
   V(ArgumentsMarker)                    \
   V(SymbolMap)                          \
@@ -468,6 +275,7 @@
   PRIVATE_SYMBOL_LIST(V)
 
 // Forward declarations.
+class AllocationObserver;
 class ArrayBufferTracker;
 class GCIdleTimeAction;
 class GCIdleTimeHandler;
@@ -483,6 +291,7 @@
 class ScavengeJob;
 class WeakObjectRetainer;
 
+typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
 
 // A queue of objects promoted during scavenge. Each object is accompanied
 // by it's size to avoid dereferencing a map pointer for scanning.
@@ -630,15 +439,17 @@
     kSmiRootsStart = kStringTableRootIndex + 1
   };
 
+  enum FindMementoMode { kForRuntime, kForGC };
+
+  enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
+
   // Indicates whether live bytes adjustment is triggered
   // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
   // - or from within GC (CONCURRENT_TO_SWEEPER),
   // - or mutator code (CONCURRENT_TO_SWEEPER).
   enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
 
-  enum PretenuringFeedbackInsertionMode { kCached, kGlobal };
-
-  enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
+  enum UpdateAllocationSiteMode { kGlobal, kCached };
 
   // Taking this lock prevents the GC from entering a phase that relocates
   // object references.
@@ -709,20 +520,6 @@
   static const double kMaxHeapGrowingFactorIdle;
   static const double kTargetMutatorUtilization;
 
-  // Sloppy mode arguments object size.
-  static const int kSloppyArgumentsObjectSize =
-      JSObject::kHeaderSize + 2 * kPointerSize;
-
-  // Strict mode arguments has no callee so it is smaller.
-  static const int kStrictArgumentsObjectSize =
-      JSObject::kHeaderSize + 1 * kPointerSize;
-
-  // Indicies for direct access into argument objects.
-  static const int kArgumentsLengthIndex = 0;
-
-  // callee is only valid in sloppy mode.
-  static const int kArgumentsCalleeIndex = 1;
-
   static const int kNoGCFlags = 0;
   static const int kReduceMemoryFootprintMask = 1;
   static const int kAbortIncrementalMarkingMask = 2;
@@ -860,20 +657,6 @@
   // Notify the heap that a context has been disposed.
   int NotifyContextDisposed(bool dependant_context);
 
-  inline void increment_scan_on_scavenge_pages() {
-    scan_on_scavenge_pages_++;
-    if (FLAG_gc_verbose) {
-      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
-    }
-  }
-
-  inline void decrement_scan_on_scavenge_pages() {
-    scan_on_scavenge_pages_--;
-    if (FLAG_gc_verbose) {
-      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
-    }
-  }
-
   void set_native_contexts_list(Object* object) {
     native_contexts_list_ = object;
   }
@@ -927,6 +710,7 @@
 
   // If an object has an AllocationMemento trailing it, return it, otherwise
   // return NULL;
+  template <FindMementoMode mode>
   inline AllocationMemento* FindAllocationMemento(HeapObject* object);
 
   // Returns false if not able to reserve.
@@ -972,7 +756,6 @@
   inline bool OldGenerationAllocationLimitReached();
 
   void QueueMemoryChunkForFree(MemoryChunk* chunk);
-  void FilterStoreBufferEntriesOnAboutToBeFreedPages();
   void FreeQueuedChunks(MemoryChunk* list_head);
   void FreeQueuedChunks();
   void WaitUntilUnmappingOfFreeChunksCompleted();
@@ -1039,7 +822,7 @@
   bool HasHighFragmentation(intptr_t used, intptr_t committed);
 
   void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
-  void SetOptimizeForMemoryUsage() { optimize_for_memory_usage_ = true; }
+  void SetOptimizeForMemoryUsage();
   bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
 
   // ===========================================================================
@@ -1074,7 +857,6 @@
   // address with the mask will result in the start address of the new space
   // for all addresses in either semispace.
   Address NewSpaceStart() { return new_space_.start(); }
-  uintptr_t NewSpaceMask() { return new_space_.mask(); }
   Address NewSpaceTop() { return new_space_.top(); }
 
   NewSpace* new_space() { return &new_space_; }
@@ -1270,16 +1052,16 @@
   // Store buffer API. =========================================================
   // ===========================================================================
 
-  // Write barrier support for address[offset] = o.
-  INLINE(void RecordWrite(Address address, int offset));
-
-  // Write barrier support for address[start : start + len[ = o.
-  INLINE(void RecordWrites(Address address, int start, int len));
+  // Write barrier support for object[offset] = o;
+  inline void RecordWrite(Object* object, int offset, Object* o);
 
   Address* store_buffer_top_address() {
     return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
   }
 
+  void ClearRecordedSlot(HeapObject* object, Object** slot);
+  void ClearRecordedSlotRange(HeapObject* object, Object** start, Object** end);
+
   // ===========================================================================
   // Incremental marking API. ==================================================
   // ===========================================================================
@@ -1318,25 +1100,27 @@
 
   // Returns whether the object resides in new space.
   inline bool InNewSpace(Object* object);
-  inline bool InNewSpace(Address address);
-  inline bool InNewSpacePage(Address address);
   inline bool InFromSpace(Object* object);
   inline bool InToSpace(Object* object);
 
   // Returns whether the object resides in old space.
-  inline bool InOldSpace(Address address);
   inline bool InOldSpace(Object* object);
 
   // Checks whether an address/object in the heap (including auxiliary
   // area and unused area).
-  bool Contains(Address addr);
   bool Contains(HeapObject* value);
 
   // Checks whether an address/object in a space.
   // Currently used by tests, serialization and heap verification only.
-  bool InSpace(Address addr, AllocationSpace space);
   bool InSpace(HeapObject* value, AllocationSpace space);
 
+  // Slow methods that can be used for verification as they can also be used
+  // with off-heap Addresses.
+  bool ContainsSlow(Address addr);
+  bool InSpaceSlow(Address addr, AllocationSpace space);
+  inline bool InNewSpaceSlow(Address address);
+  inline bool InOldSpaceSlow(Address address);
+
   // ===========================================================================
   // Object statistics tracking. ===============================================
   // ===========================================================================
@@ -1371,7 +1155,6 @@
   int MaxSemiSpaceSize() { return max_semi_space_size_; }
   int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
   int InitialSemiSpaceSize() { return initial_semispace_size_; }
-  int TargetSemiSpaceSize() { return target_semispace_size_; }
   intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
   intptr_t MaxExecutableSize() { return max_executable_size_; }
 
@@ -1408,13 +1191,13 @@
 
   void UpdateSurvivalStatistics(int start_new_space_size);
 
-  inline void IncrementPromotedObjectsSize(int object_size) {
+  inline void IncrementPromotedObjectsSize(intptr_t object_size) {
     DCHECK_GE(object_size, 0);
     promoted_objects_size_ += object_size;
   }
   inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
 
-  inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
+  inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) {
     DCHECK_GE(object_size, 0);
     semi_space_copied_object_size_ += object_size;
   }
@@ -1432,8 +1215,8 @@
 
   inline void IncrementNodesPromoted() { nodes_promoted_++; }
 
-  inline void IncrementYoungSurvivorsCounter(int survived) {
-    DCHECK(survived >= 0);
+  inline void IncrementYoungSurvivorsCounter(intptr_t survived) {
+    DCHECK_GE(survived, 0);
     survived_last_scavenge_ = survived;
     survived_since_last_expansion_ += survived;
   }
@@ -1547,6 +1330,7 @@
   // the corresponding allocation site is immediately updated and an entry
   // in the hash map is created. Otherwise the entry (including a the count
   // value) is cached on the local pretenuring feedback.
+  template <UpdateAllocationSiteMode mode>
   inline void UpdateAllocationSite(HeapObject* object,
                                    HashMap* pretenuring_feedback);
 
@@ -1580,6 +1364,9 @@
   void ReportHeapStatistics(const char* title);
   void ReportCodeStatistics(const char* title);
 #endif
+#ifdef ENABLE_SLOW_DCHECKS
+  int CountHandlesForObject(Object* object);
+#endif
 
  private:
   class PretenuringScope;
@@ -1684,9 +1471,6 @@
   static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
       Heap* heap, Object** pointer);
 
-  static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
-                                          StoreBufferEvent event);
-
   // Selects the proper allocation space based on the pretenuring decision.
   static AllocationSpace SelectSpace(PretenureFlag pretenure) {
     return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
@@ -2007,6 +1791,9 @@
 
   MUST_USE_RESULT AllocationResult CopyCode(Code* code);
 
+  MUST_USE_RESULT AllocationResult
+  CopyBytecodeArray(BytecodeArray* bytecode_array);
+
   // Allocates a fixed array initialized with undefined values
   MUST_USE_RESULT AllocationResult
   AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
@@ -2084,6 +1871,11 @@
   MUST_USE_RESULT AllocationResult
   CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure);
 
+  // Make a copy of src, also grow the copy, and return the copy.
+  MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src,
+                                                      int new_len,
+                                                      PretenureFlag pretenure);
+
   // Make a copy of src, set the map, and return the copy.
   MUST_USE_RESULT AllocationResult
       CopyFixedArrayWithMap(FixedArray* src, Map* map);
@@ -2182,10 +1974,10 @@
 
   // For keeping track of how much data has survived
   // scavenge since last new space expansion.
-  int survived_since_last_expansion_;
+  intptr_t survived_since_last_expansion_;
 
   // ... and since the last scavenge.
-  int survived_last_scavenge_;
+  intptr_t survived_last_scavenge_;
 
   // This is not the depth of nested AlwaysAllocateScope's but rather a single
   // count, as scopes can be acquired from multiple tasks (read: threads).
@@ -2201,8 +1993,6 @@
 
   int global_ic_age_;
 
-  int scan_on_scavenge_pages_;
-
   NewSpace new_space_;
   OldSpace* old_space_;
   OldSpace* code_space_;
@@ -2270,8 +2060,6 @@
 
   Object* encountered_transition_arrays_;
 
-  StoreBufferRebuilder store_buffer_rebuilder_;
-
   List<GCCallbackPair> gc_epilogue_callbacks_;
   List<GCCallbackPair> gc_prologue_callbacks_;
 
@@ -2339,7 +2127,7 @@
 
   ScavengeJob* scavenge_job_;
 
-  InlineAllocationObserver* idle_scavenge_observer_;
+  AllocationObserver* idle_scavenge_observer_;
 
   // These two counters are monotomically increasing and never reset.
   size_t full_codegen_bytes_generated_;
@@ -2696,16 +2484,7 @@
     }
   }
 
-  static int Hash(Object* source, Name* name) {
-    // Uses only lower 32 bits if pointers are larger.
-    uint32_t source_hash =
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
-        kPointerSizeLog2;
-    uint32_t name_hash =
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >>
-        kPointerSizeLog2;
-    return (source_hash ^ name_hash) % kLength;
-  }
+  static inline int Hash(Object* source, Name* name);
 
   static const int kLength = 64;
   struct Key {
@@ -2790,6 +2569,61 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
 };
 #endif  // DEBUG
+
+// -----------------------------------------------------------------------------
+// Allows observation of allocations.
+class AllocationObserver {
+ public:
+  explicit AllocationObserver(intptr_t step_size)
+      : step_size_(step_size), bytes_to_next_step_(step_size) {
+    DCHECK(step_size >= kPointerSize);
+  }
+  virtual ~AllocationObserver() {}
+
+  // Called each time the observed space does an allocation step. This may be
+  // more frequently than the step_size we are monitoring (e.g. when there are
+  // multiple observers, or when page or space boundary is encountered.)
+  void AllocationStep(int bytes_allocated, Address soon_object, size_t size) {
+    bytes_to_next_step_ -= bytes_allocated;
+    if (bytes_to_next_step_ <= 0) {
+      Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
+           size);
+      step_size_ = GetNextStepSize();
+      bytes_to_next_step_ = step_size_;
+    }
+  }
+
+ protected:
+  intptr_t step_size() const { return step_size_; }
+  intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
+
+  // Pure virtual method provided by the subclasses that gets called when at
+  // least step_size bytes have been allocated. soon_object is the address just
+  // allocated (but not yet initialized.) size is the size of the object as
+  // requested (i.e. w/o the alignment fillers). Some complexities to be aware
+  // of:
+  // 1) soon_object will be nullptr in cases where we end up observing an
+  //    allocation that happens to be a filler space (e.g. page boundaries.)
+  // 2) size is the requested size at the time of allocation. Right-trimming
+  //    may change the object size dynamically.
+  // 3) soon_object may actually be the first object in an allocation-folding
+  //    group. In such a case size is the size of the group rather than the
+  //    first object.
+  virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
+
+  // Subclasses can override this method to make step size dynamic.
+  virtual intptr_t GetNextStepSize() { return step_size_; }
+
+  intptr_t step_size_;
+  intptr_t bytes_to_next_step_;
+
+ private:
+  friend class LargeObjectSpace;
+  friend class NewSpace;
+  friend class PagedSpace;
+  DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
index 52d0ca4..ce6f6ee 100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -12,6 +12,7 @@
 #include "src/heap/mark-compact-inl.h"
 #include "src/heap/objects-visiting.h"
 #include "src/heap/objects-visiting-inl.h"
+#include "src/tracing/trace-event.h"
 #include "src/v8.h"
 
 namespace v8 {
@@ -23,7 +24,6 @@
                      IncrementalMarking::DO_NOT_FORCE_COMPLETION);
 }
 
-
 IncrementalMarking::IncrementalMarking(Heap* heap)
     : heap_(heap),
       observer_(*this, kAllocatedThreshold),
@@ -46,7 +46,6 @@
       incremental_marking_finalization_rounds_(0),
       request_type_(COMPLETE_MARKING) {}
 
-
 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
   HeapObject* value_heap_obj = HeapObject::cast(value);
   MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
@@ -91,6 +90,16 @@
   marking->RecordWrite(obj, slot, *slot);
 }
 
+// static
+void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
+                                                        Object** slot,
+                                                        Isolate* isolate) {
+  DCHECK(host->IsJSFunction());
+  IncrementalMarking* marking = isolate->heap()->incremental_marking();
+  Code* value = Code::cast(
+      Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
+  marking->RecordWriteOfCodeEntry(host, slot, value);
+}
 
 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
                                                HeapObject* value) {
@@ -128,8 +137,7 @@
                                                  Object* value) {
   if (BaseRecordWrite(obj, value)) {
       // Object is not going to be rescanned.  We need to record the slot.
-      heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
-                                                       Code::cast(value));
+      heap_->mark_compact_collector()->RecordRelocSlot(rinfo, value);
   }
 }
 
@@ -366,7 +374,6 @@
   } else {
     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
   }
-  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
 }
 
 
@@ -437,7 +444,16 @@
 
 
 bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
+#ifndef DEBUG
+  static const intptr_t kActivationThreshold = 8 * MB;
+#else
+  // TODO(gc) consider setting this to some low level so that some
+  // debug tests run with incremental marking and some without.
+  static const intptr_t kActivationThreshold = 0;
+#endif
+  // Don't switch on for very small heaps.
   return CanBeActivated() &&
+         heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
          heap_->HeapIsFullEnoughToStartIncrementalMarking(
              heap_->old_generation_allocation_limit());
 }
@@ -447,21 +463,12 @@
 
 
 bool IncrementalMarking::CanBeActivated() {
-#ifndef DEBUG
-  static const intptr_t kActivationThreshold = 8 * MB;
-#else
-  // TODO(gc) consider setting this to some low level so that some
-  // debug tests run with incremental marking and some without.
-  static const intptr_t kActivationThreshold = 0;
-#endif
   // Only start incremental marking in a safe state: 1) when incremental
   // marking is turned on, 2) when we are currently not in a GC, and
   // 3) when we are currently not serializing or deserializing the heap.
-  // Don't switch on for very small heaps.
   return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
          heap_->deserialization_complete() &&
-         !heap_->isolate()->serializer_enabled() &&
-         heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
+         !heap_->isolate()->serializer_enabled();
 }
 
 
@@ -528,6 +535,7 @@
 
   HistogramTimerScope incremental_marking_scope(
       heap_->isolate()->counters()->gc_incremental_marking_start());
+  TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
   ResetStepCounters();
 
   was_activated_ = true;
@@ -541,7 +549,7 @@
     state_ = SWEEPING;
   }
 
-  heap_->new_space()->AddInlineAllocationObserver(&observer_);
+  heap_->new_space()->AddAllocationObserver(&observer_);
 
   incremental_marking_job()->Start(heap_);
 }
@@ -787,8 +795,14 @@
     HeapObject* obj = array[current];
     DCHECK(obj->IsHeapObject());
     current = ((current + 1) & mask);
-    if (heap_->InNewSpace(obj)) {
+    // Only pointers to from space have to be updated.
+    if (heap_->InFromSpace(obj)) {
       MapWord map_word = obj->map_word();
+      // There may be objects on the marking deque that do not exist anymore,
+      // e.g. left trimmed objects or objects from the root set (frames).
+      // If these object are dead at scavenging time, their marking deque
+      // entries will not point to forwarding addresses. Hence, we can discard
+      // them.
       if (map_word.IsForwardingAddress()) {
         HeapObject* dest = map_word.ToForwardingAddress();
         array[new_top] = dest;
@@ -847,16 +861,21 @@
 
 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
   intptr_t bytes_processed = 0;
-  Map* filler_map = heap_->one_pointer_filler_map();
+  Map* one_pointer_filler_map = heap_->one_pointer_filler_map();
+  Map* two_pointer_filler_map = heap_->two_pointer_filler_map();
   MarkingDeque* marking_deque =
       heap_->mark_compact_collector()->marking_deque();
   while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
     HeapObject* obj = marking_deque->Pop();
 
-    // Explicitly skip one word fillers. Incremental markbit patterns are
-    // correct only for objects that occupy at least two words.
+    // Explicitly skip one and two word fillers. Incremental markbit patterns
+    // are correct only for objects that occupy at least two words.
+    // Moreover, slots filtering for left-trimmed arrays works only when
+    // the distance between the old array start and the new array start
+    // is greater than two if both starts are marked.
     Map* map = obj->map();
-    if (map == filler_map) continue;
+    if (map == one_pointer_filler_map || map == two_pointer_filler_map)
+      continue;
 
     int size = obj->SizeFromMap(map);
     unscanned_bytes_of_large_object_ = 0;
@@ -939,23 +958,13 @@
     PrintF("[IncrementalMarking] Stopping.\n");
   }
 
-  heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
+  heap_->new_space()->RemoveAllocationObserver(&observer_);
   IncrementalMarking::set_should_hurry(false);
   ResetStepCounters();
   if (IsMarking()) {
     PatchIncrementalMarkingRecordWriteStubs(heap_,
                                             RecordWriteStub::STORE_BUFFER_ONLY);
     DeactivateIncrementalWriteBarrier();
-
-    if (is_compacting_) {
-      LargeObjectIterator it(heap_->lo_space());
-      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-        Page* p = Page::FromAddress(obj->address());
-        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
-        }
-      }
-    }
   }
   heap_->isolate()->stack_guard()->ClearGC();
   state_ = STOPPED;
@@ -965,17 +974,7 @@
 
 void IncrementalMarking::Finalize() {
   Hurry();
-  state_ = STOPPED;
-  is_compacting_ = false;
-
-  heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
-  IncrementalMarking::set_should_hurry(false);
-  ResetStepCounters();
-  PatchIncrementalMarkingRecordWriteStubs(heap_,
-                                          RecordWriteStub::STORE_BUFFER_ONLY);
-  DeactivateIncrementalWriteBarrier();
-  DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty());
-  heap_->isolate()->stack_guard()->ClearGC();
+  Stop();
 }
 
 
@@ -1159,6 +1158,7 @@
   {
     HistogramTimerScope incremental_marking_scope(
         heap_->isolate()->counters()->gc_incremental_marking());
+    TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
     double start = heap_->MonotonicallyIncreasingTimeInMs();
 
     // The marking speed is driven either by the allocation rate or by the rate
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index be63021..387dd0c 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -7,6 +7,7 @@
 
 #include "src/cancelable-task.h"
 #include "src/execution.h"
+#include "src/heap/heap.h"
 #include "src/heap/incremental-marking-job.h"
 #include "src/heap/spaces.h"
 #include "src/objects.h"
@@ -153,6 +154,9 @@
   static void RecordWriteFromCode(HeapObject* obj, Object** slot,
                                   Isolate* isolate);
 
+  static void RecordWriteOfCodeEntryFromCode(JSFunction* host, Object** slot,
+                                             Isolate* isolate);
+
   // Record a slot for compaction.  Returns false for objects that are
   // guaranteed to be rescanned or not guaranteed to survive.
   //
@@ -215,10 +219,10 @@
   }
 
  private:
-  class Observer : public InlineAllocationObserver {
+  class Observer : public AllocationObserver {
    public:
     Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
-        : InlineAllocationObserver(step_size),
+        : AllocationObserver(step_size),
           incremental_marking_(incremental_marking) {}
 
     void Step(int bytes_allocated, Address, size_t) override {
diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h
index a59d36b..f117ace 100644
--- a/src/heap/mark-compact-inl.h
+++ b/src/heap/mark-compact-inl.h
@@ -12,6 +12,17 @@
 namespace v8 {
 namespace internal {
 
+inline std::vector<Page*>& MarkCompactCollector::sweeping_list(Space* space) {
+  if (space == heap()->old_space()) {
+    return sweeping_list_old_space_;
+  } else if (space == heap()->code_space()) {
+    return sweeping_list_code_space_;
+  }
+  DCHECK_EQ(space, heap()->map_space());
+  return sweeping_list_map_space_;
+}
+
+
 void MarkCompactCollector::PushBlack(HeapObject* obj) {
   DCHECK(Marking::IsBlack(Marking::MarkBitFrom(obj)));
   if (marking_deque_.Push(obj)) {
@@ -83,7 +94,7 @@
 
 
 void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
-  if (GetNextCandidate(shared_info) == NULL) {
+  if (GetNextCandidate(shared_info) == nullptr) {
     SetNextCandidate(shared_info, shared_function_info_candidates_head_);
     shared_function_info_candidates_head_ = shared_info;
   }
@@ -92,7 +103,7 @@
 
 void CodeFlusher::AddCandidate(JSFunction* function) {
   DCHECK(function->code() == function->shared()->code());
-  if (GetNextCandidate(function)->IsUndefined()) {
+  if (function->next_function_link()->IsUndefined()) {
     SetNextCandidate(function, jsfunction_candidates_head_);
     jsfunction_candidates_head_ = function;
   }
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index 65bfdd9..646e634 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -19,13 +19,14 @@
 #include "src/heap/incremental-marking.h"
 #include "src/heap/mark-compact-inl.h"
 #include "src/heap/object-stats.h"
-#include "src/heap/objects-visiting.h"
 #include "src/heap/objects-visiting-inl.h"
+#include "src/heap/objects-visiting.h"
 #include "src/heap/slots-buffer.h"
 #include "src/heap/spaces-inl.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
 #include "src/profiler/cpu-profiler.h"
+#include "src/utils-inl.h"
 #include "src/v8.h"
 
 namespace v8 {
@@ -314,15 +315,13 @@
   {
     GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
-    heap_->store_buffer()->ClearInvalidStoreBufferEntries();
+    RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
   }
 
   {
     GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
-    int number_of_pages = evacuation_candidates_.length();
-    for (int i = 0; i < number_of_pages; i++) {
-      Page* p = evacuation_candidates_[i];
+    for (Page* p : evacuation_candidates_) {
       SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
     }
   }
@@ -345,7 +344,7 @@
 
 
 void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
-  heap()->store_buffer()->VerifyValidStoreBufferEntries();
+  RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
 
   VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
   VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
@@ -478,45 +477,32 @@
 }
 
 
-class MarkCompactCollector::CompactionTask : public CancelableTask {
- public:
-  explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
-      : CancelableTask(heap->isolate()), spaces_(spaces) {}
-
-  virtual ~CompactionTask() {}
-
- private:
-  // v8::internal::CancelableTask overrides.
-  void RunInternal() override {
-    MarkCompactCollector* mark_compact =
-        isolate()->heap()->mark_compact_collector();
-    SlotsBuffer* evacuation_slots_buffer = nullptr;
-    mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
-    mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
-    mark_compact->pending_compaction_tasks_semaphore_.Signal();
-  }
-
-  CompactionSpaceCollection* spaces_;
-
-  DISALLOW_COPY_AND_ASSIGN(CompactionTask);
-};
-
-
 class MarkCompactCollector::SweeperTask : public v8::Task {
  public:
-  SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
+  SweeperTask(Heap* heap, AllocationSpace space_to_start)
+      : heap_(heap), space_to_start_(space_to_start) {}
 
   virtual ~SweeperTask() {}
 
  private:
   // v8::Task overrides.
   void Run() override {
-    heap_->mark_compact_collector()->SweepInParallel(space_, 0);
+    DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
+    DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
+    const int offset = space_to_start_ - FIRST_PAGED_SPACE;
+    const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+    for (int i = 0; i < num_spaces; i++) {
+      const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
+      DCHECK_GE(space_id, FIRST_PAGED_SPACE);
+      DCHECK_LE(space_id, LAST_PAGED_SPACE);
+      heap_->mark_compact_collector()->SweepInParallel(
+          heap_->paged_space(space_id), 0);
+    }
     heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
   }
 
   Heap* heap_;
-  PagedSpace* space_;
+  AllocationSpace space_to_start_;
 
   DISALLOW_COPY_AND_ASSIGN(SweeperTask);
 };
@@ -527,22 +513,19 @@
   DCHECK(free_list_code_space_.get()->IsEmpty());
   DCHECK(free_list_map_space_.get()->IsEmpty());
   V8::GetCurrentPlatform()->CallOnBackgroundThread(
-      new SweeperTask(heap(), heap()->old_space()),
-      v8::Platform::kShortRunningTask);
+      new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask);
   V8::GetCurrentPlatform()->CallOnBackgroundThread(
-      new SweeperTask(heap(), heap()->code_space()),
-      v8::Platform::kShortRunningTask);
+      new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask);
   V8::GetCurrentPlatform()->CallOnBackgroundThread(
-      new SweeperTask(heap(), heap()->map_space()),
-      v8::Platform::kShortRunningTask);
+      new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask);
 }
 
 
 void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
   PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
-  if (!page->SweepingCompleted()) {
+  if (!page->SweepingDone()) {
     SweepInParallel(page, owner);
-    if (!page->SweepingCompleted()) {
+    if (!page->SweepingDone()) {
       // We were not able to sweep that page, i.e., a concurrent
       // sweeper thread currently owns this page. Wait for the sweeper
       // thread to be done with this page.
@@ -721,14 +704,14 @@
       continue;
     }
     // Invariant: Evacuation candidates are just created when marking is
-    // started. At the end of a GC all evacuation candidates are cleared and
-    // their slot buffers are released.
+    // started. This means that sweeping has finished. Furthermore, at the end
+    // of a GC all evacuation candidates are cleared and their slot buffers are
+    // released.
     CHECK(!p->IsEvacuationCandidate());
-    CHECK(p->slots_buffer() == NULL);
+    CHECK(p->slots_buffer() == nullptr);
+    CHECK(p->SweepingDone());
     DCHECK(p->area_size() == area_size);
-    int live_bytes =
-        p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
-    pages.push_back(std::make_pair(live_bytes, p));
+    pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
   }
 
   int candidate_count = 0;
@@ -831,9 +814,7 @@
 
 void MarkCompactCollector::AbortCompaction() {
   if (compacting_) {
-    int npages = evacuation_candidates_.length();
-    for (int i = 0; i < npages; i++) {
-      Page* p = evacuation_candidates_[i];
+    for (Page* p : evacuation_candidates_) {
       slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
       p->ClearEvacuationCandidate();
       p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
@@ -1224,9 +1205,6 @@
   }
 
  private:
-  template <int id>
-  static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
-
   // Code flushing support.
 
   static const int kRegExpCodeThreshold = 5;
@@ -1551,8 +1529,13 @@
 class MarkCompactCollector::EvacuateVisitorBase
     : public MarkCompactCollector::HeapObjectVisitor {
  public:
-  EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
-      : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
+  EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
+                      SlotsBuffer** evacuation_slots_buffer,
+                      LocalStoreBuffer* local_store_buffer)
+      : heap_(heap),
+        evacuation_slots_buffer_(evacuation_slots_buffer),
+        compaction_spaces_(compaction_spaces),
+        local_store_buffer_(local_store_buffer) {}
 
   bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
                          HeapObject** target_object) {
@@ -1562,7 +1545,7 @@
     if (allocation.To(target_object)) {
       heap_->mark_compact_collector()->MigrateObject(
           *target_object, object, size, target_space->identity(),
-          evacuation_slots_buffer_);
+          evacuation_slots_buffer_, local_store_buffer_);
       return true;
     }
     return false;
@@ -1571,6 +1554,8 @@
  protected:
   Heap* heap_;
   SlotsBuffer** evacuation_slots_buffer_;
+  CompactionSpaceCollection* compaction_spaces_;
+  LocalStoreBuffer* local_store_buffer_;
 };
 
 
@@ -1581,9 +1566,12 @@
   static const intptr_t kMaxLabObjectSize = 256;
 
   explicit EvacuateNewSpaceVisitor(Heap* heap,
+                                   CompactionSpaceCollection* compaction_spaces,
                                    SlotsBuffer** evacuation_slots_buffer,
+                                   LocalStoreBuffer* local_store_buffer,
                                    HashMap* local_pretenuring_feedback)
-      : EvacuateVisitorBase(heap, evacuation_slots_buffer),
+      : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
+                            local_store_buffer),
         buffer_(LocalAllocationBuffer::InvalidBuffer()),
         space_to_allocate_(NEW_SPACE),
         promoted_size_(0),
@@ -1591,11 +1579,13 @@
         local_pretenuring_feedback_(local_pretenuring_feedback) {}
 
   bool Visit(HeapObject* object) override {
-    heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
+    heap_->UpdateAllocationSite<Heap::kCached>(object,
+                                               local_pretenuring_feedback_);
     int size = object->Size();
     HeapObject* target_object = nullptr;
     if (heap_->ShouldBePromoted(object->address(), size) &&
-        TryEvacuateObject(heap_->old_space(), object, &target_object)) {
+        TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
+                          &target_object)) {
       // If we end up needing more special cases, we should factor this out.
       if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
         heap_->array_buffer_tracker()->Promote(
@@ -1608,7 +1598,8 @@
     AllocationSpace space = AllocateTargetObject(object, &target);
     heap_->mark_compact_collector()->MigrateObject(
         HeapObject::cast(target), object, size, space,
-        (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
+        (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_,
+        (space == NEW_SPACE) ? nullptr : local_store_buffer_);
     if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
       heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
     }
@@ -1681,7 +1672,8 @@
   inline AllocationResult AllocateInOldSpace(int size_in_bytes,
                                              AllocationAlignment alignment) {
     AllocationResult allocation =
-        heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
+        compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
+                                                        alignment);
     if (allocation.IsRetry()) {
       FatalProcessOutOfMemory(
           "MarkCompactCollector: semi-space copy, fallback in old gen\n");
@@ -1727,9 +1719,10 @@
  public:
   EvacuateOldSpaceVisitor(Heap* heap,
                           CompactionSpaceCollection* compaction_spaces,
-                          SlotsBuffer** evacuation_slots_buffer)
-      : EvacuateVisitorBase(heap, evacuation_slots_buffer),
-        compaction_spaces_(compaction_spaces) {}
+                          SlotsBuffer** evacuation_slots_buffer,
+                          LocalStoreBuffer* local_store_buffer)
+      : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
+                            local_store_buffer) {}
 
   bool Visit(HeapObject* object) override {
     CompactionSpace* target_space = compaction_spaces_->Get(
@@ -1741,9 +1734,6 @@
     }
     return false;
   }
-
- private:
-  CompactionSpaceCollection* compaction_spaces_;
 };
 
 
@@ -2551,16 +2541,17 @@
   heap()->set_encountered_transition_arrays(Smi::FromInt(0));
 }
 
-
 void MarkCompactCollector::RecordMigratedSlot(
-    Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
+    Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer,
+    LocalStoreBuffer* local_store_buffer) {
   // When parallel compaction is in progress, store and slots buffer entries
   // require synchronization.
   if (heap_->InNewSpace(value)) {
     if (compaction_in_progress_) {
-      heap_->store_buffer()->MarkSynchronized(slot);
+      local_store_buffer->Record(slot);
     } else {
-      heap_->store_buffer()->Mark(slot);
+      Page* page = Page::FromAddress(slot);
+      RememberedSet<OLD_TO_NEW>::Insert(page, slot);
     }
   } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
     SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
@@ -2640,19 +2631,23 @@
 class RecordMigratedSlotVisitor final : public ObjectVisitor {
  public:
   RecordMigratedSlotVisitor(MarkCompactCollector* collector,
-                            SlotsBuffer** evacuation_slots_buffer)
+                            SlotsBuffer** evacuation_slots_buffer,
+                            LocalStoreBuffer* local_store_buffer)
       : collector_(collector),
-        evacuation_slots_buffer_(evacuation_slots_buffer) {}
+        evacuation_slots_buffer_(evacuation_slots_buffer),
+        local_store_buffer_(local_store_buffer) {}
 
   V8_INLINE void VisitPointer(Object** p) override {
     collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
-                                   evacuation_slots_buffer_);
+                                   evacuation_slots_buffer_,
+                                   local_store_buffer_);
   }
 
   V8_INLINE void VisitPointers(Object** start, Object** end) override {
     while (start < end) {
       collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
-                                     evacuation_slots_buffer_);
+                                     evacuation_slots_buffer_,
+                                     local_store_buffer_);
       ++start;
     }
   }
@@ -2668,6 +2663,7 @@
  private:
   MarkCompactCollector* collector_;
   SlotsBuffer** evacuation_slots_buffer_;
+  LocalStoreBuffer* local_store_buffer_;
 };
 
 
@@ -2685,9 +2681,10 @@
 // pointer iteration.  This is an issue if the store buffer overflows and we
 // have to scan the entire old space, including dead objects, looking for
 // pointers to new space.
-void MarkCompactCollector::MigrateObject(
-    HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
-    SlotsBuffer** evacuation_slots_buffer) {
+void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
+                                         int size, AllocationSpace dest,
+                                         SlotsBuffer** evacuation_slots_buffer,
+                                         LocalStoreBuffer* local_store_buffer) {
   Address dst_addr = dst->address();
   Address src_addr = src->address();
   DCHECK(heap()->AllowedToBeMigrated(src, dest));
@@ -2698,7 +2695,8 @@
     DCHECK(IsAligned(size, kPointerSize));
 
     heap()->MoveBlock(dst->address(), src->address(), size);
-    RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer);
+    RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer,
+                                      local_store_buffer);
     dst->IterateBody(&visitor);
   } else if (dest == CODE_SPACE) {
     DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
@@ -2884,11 +2882,12 @@
 
 static void UpdatePointer(HeapObject** address, HeapObject* object) {
   MapWord map_word = object->map_word();
-  // The store buffer can still contain stale pointers in dead large objects.
-  // Ignore these pointers here.
+  // Since we only filter invalid slots in old space, the store buffer can
+  // still contain stale pointers in large object and in map spaces. Ignore
+  // these pointers here.
   DCHECK(map_word.IsForwardingAddress() ||
-         object->GetHeap()->lo_space()->FindPage(
-             reinterpret_cast<Address>(address)) != NULL);
+         !object->GetHeap()->old_space()->Contains(
+             reinterpret_cast<Address>(address)));
   if (map_word.IsForwardingAddress()) {
     // Update the corresponding slot.
     *address = map_word.ToForwardingAddress();
@@ -3060,54 +3059,18 @@
 
 
 void MarkCompactCollector::EvacuateNewSpacePrologue() {
-  // There are soft limits in the allocation code, designed trigger a mark
-  // sweep collection by failing allocations.  But since we are already in
-  // a mark-sweep allocation, there is no sense in trying to trigger one.
-  AlwaysAllocateScope scope(isolate());
-
   NewSpace* new_space = heap()->new_space();
-
-  // Store allocation range before flipping semispaces.
-  Address from_bottom = new_space->bottom();
-  Address from_top = new_space->top();
-
-  // Flip the semispaces.  After flipping, to space is empty, from space has
-  // live objects.
-  new_space->Flip();
-  new_space->ResetAllocationInfo();
-
-  newspace_evacuation_candidates_.Clear();
-  NewSpacePageIterator it(from_bottom, from_top);
+  NewSpacePageIterator it(new_space->bottom(), new_space->top());
+  // Append the list of new space pages to be processed.
   while (it.has_next()) {
     newspace_evacuation_candidates_.Add(it.next());
   }
+  new_space->Flip();
+  new_space->ResetAllocationInfo();
 }
 
-
-HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
-  HashMap* local_pretenuring_feedback = new HashMap(
-      HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
-  EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
-                                            local_pretenuring_feedback);
-  // First pass: traverse all objects in inactive semispace, remove marks,
-  // migrate live objects and write forwarding addresses.  This stage puts
-  // new entries in the store buffer and may cause some pages to be marked
-  // scan-on-scavenge.
-  for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
-    NewSpacePage* p =
-        reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
-    bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
-    USE(ok);
-    DCHECK(ok);
-  }
-  heap_->IncrementPromotedObjectsSize(
-      static_cast<int>(new_space_visitor.promoted_size()));
-  heap_->IncrementSemiSpaceCopiedObjectSize(
-      static_cast<int>(new_space_visitor.semispace_copied_size()));
-  heap_->IncrementYoungSurvivorsCounter(
-      static_cast<int>(new_space_visitor.promoted_size()) +
-      static_cast<int>(new_space_visitor.semispace_copied_size()));
-  return local_pretenuring_feedback;
+void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
+  newspace_evacuation_candidates_.Rewind(0);
 }
 
 
@@ -3117,8 +3080,168 @@
   evacuation_slots_buffers_.Add(evacuation_slots_buffer);
 }
 
+class MarkCompactCollector::Evacuator : public Malloced {
+ public:
+  Evacuator(MarkCompactCollector* collector,
+            const List<Page*>& evacuation_candidates,
+            const List<NewSpacePage*>& newspace_evacuation_candidates)
+      : collector_(collector),
+        evacuation_candidates_(evacuation_candidates),
+        newspace_evacuation_candidates_(newspace_evacuation_candidates),
+        compaction_spaces_(collector->heap()),
+        local_slots_buffer_(nullptr),
+        local_store_buffer_(collector->heap()),
+        local_pretenuring_feedback_(HashMap::PointersMatch,
+                                    kInitialLocalPretenuringFeedbackCapacity),
+        new_space_visitor_(collector->heap(), &compaction_spaces_,
+                           &local_slots_buffer_, &local_store_buffer_,
+                           &local_pretenuring_feedback_),
+        old_space_visitor_(collector->heap(), &compaction_spaces_,
+                           &local_slots_buffer_, &local_store_buffer_),
+        duration_(0.0),
+        bytes_compacted_(0),
+        task_id_(0) {}
 
-int MarkCompactCollector::NumberOfParallelCompactionTasks() {
+  // Evacuate the configured set of pages in parallel.
+  inline void EvacuatePages();
+
+  // Merge back locally cached info sequentially. Note that this method needs
+  // to be called from the main thread.
+  inline void Finalize();
+
+  CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
+
+  uint32_t task_id() { return task_id_; }
+  void set_task_id(uint32_t id) { task_id_ = id; }
+
+ private:
+  static const int kInitialLocalPretenuringFeedbackCapacity = 256;
+
+  Heap* heap() { return collector_->heap(); }
+
+  void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
+    duration_ += duration;
+    bytes_compacted_ += bytes_compacted;
+  }
+
+  inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
+
+  MarkCompactCollector* collector_;
+
+  // Pages to process.
+  const List<Page*>& evacuation_candidates_;
+  const List<NewSpacePage*>& newspace_evacuation_candidates_;
+
+  // Locally cached collector data.
+  CompactionSpaceCollection compaction_spaces_;
+  SlotsBuffer* local_slots_buffer_;
+  LocalStoreBuffer local_store_buffer_;
+  HashMap local_pretenuring_feedback_;
+
+  // Vistors for the corresponding spaces.
+  EvacuateNewSpaceVisitor new_space_visitor_;
+  EvacuateOldSpaceVisitor old_space_visitor_;
+
+  // Book keeping info.
+  double duration_;
+  intptr_t bytes_compacted_;
+
+  // Task id, if this evacuator is executed on a background task instead of
+  // the main thread. Can be used to try to abort the task currently scheduled
+  // to executed to evacuate pages.
+  uint32_t task_id_;
+};
+
+bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
+    MemoryChunk* p, HeapObjectVisitor* visitor) {
+  bool success = true;
+  if (p->parallel_compaction_state().TrySetValue(
+          MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
+    if (p->IsEvacuationCandidate() || p->InNewSpace()) {
+      DCHECK_EQ(p->parallel_compaction_state().Value(),
+                MemoryChunk::kCompactingInProgress);
+      int saved_live_bytes = p->LiveBytes();
+      double evacuation_time;
+      {
+        AlwaysAllocateScope always_allocate(heap()->isolate());
+        TimedScope timed_scope(&evacuation_time);
+        success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
+      }
+      if (success) {
+        ReportCompactionProgress(evacuation_time, saved_live_bytes);
+        p->parallel_compaction_state().SetValue(
+            MemoryChunk::kCompactingFinalize);
+      } else {
+        p->parallel_compaction_state().SetValue(
+            MemoryChunk::kCompactingAborted);
+      }
+    } else {
+      // There could be popular pages in the list of evacuation candidates
+      // which we do not compact.
+      p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
+    }
+  }
+  return success;
+}
+
+void MarkCompactCollector::Evacuator::EvacuatePages() {
+  for (NewSpacePage* p : newspace_evacuation_candidates_) {
+    DCHECK(p->InNewSpace());
+    DCHECK_EQ(p->concurrent_sweeping_state().Value(),
+              NewSpacePage::kSweepingDone);
+    bool success = EvacuateSinglePage(p, &new_space_visitor_);
+    DCHECK(success);
+    USE(success);
+  }
+  for (Page* p : evacuation_candidates_) {
+    DCHECK(p->IsEvacuationCandidate() ||
+           p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
+    DCHECK_EQ(p->concurrent_sweeping_state().Value(), Page::kSweepingDone);
+    EvacuateSinglePage(p, &old_space_visitor_);
+  }
+}
+
+void MarkCompactCollector::Evacuator::Finalize() {
+  heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
+  heap()->code_space()->MergeCompactionSpace(
+      compaction_spaces_.Get(CODE_SPACE));
+  heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
+  heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
+  heap()->IncrementSemiSpaceCopiedObjectSize(
+      new_space_visitor_.semispace_copied_size());
+  heap()->IncrementYoungSurvivorsCounter(
+      new_space_visitor_.promoted_size() +
+      new_space_visitor_.semispace_copied_size());
+  heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
+  local_store_buffer_.Process(heap()->store_buffer());
+  collector_->AddEvacuationSlotsBufferSynchronized(local_slots_buffer_);
+}
+
+class MarkCompactCollector::CompactionTask : public CancelableTask {
+ public:
+  explicit CompactionTask(Heap* heap, Evacuator* evacuator)
+      : CancelableTask(heap->isolate()), heap_(heap), evacuator_(evacuator) {
+    evacuator->set_task_id(id());
+  }
+
+  virtual ~CompactionTask() {}
+
+ private:
+  // v8::internal::CancelableTask overrides.
+  void RunInternal() override {
+    evacuator_->EvacuatePages();
+    heap_->mark_compact_collector()
+        ->pending_compaction_tasks_semaphore_.Signal();
+  }
+
+  Heap* heap_;
+  Evacuator* evacuator_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompactionTask);
+};
+
+int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
+                                                          intptr_t live_bytes) {
   if (!FLAG_parallel_compaction) return 1;
   // Compute the number of needed tasks based on a target compaction time, the
   // profiled compaction speed and marked live memory.
@@ -3126,83 +3249,75 @@
   // The number of parallel compaction tasks is limited by:
   // - #evacuation pages
   // - (#cores - 1)
-  // - a hard limit
   const double kTargetCompactionTimeInMs = 1;
-  const int kMaxCompactionTasks = 8;
+  const int kNumSweepingTasks = 3;
 
   intptr_t compaction_speed =
       heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
-  if (compaction_speed == 0) return 1;
 
-  intptr_t live_bytes = 0;
-  for (Page* page : evacuation_candidates_) {
-    live_bytes += page->LiveBytes();
+  const int available_cores =
+      Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1);
+  int tasks;
+  if (compaction_speed > 0) {
+    tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
+                                 compaction_speed / kTargetCompactionTimeInMs);
+  } else {
+    tasks = pages;
   }
-
-  const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
-  const int tasks =
-      1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
-                           kTargetCompactionTimeInMs);
-  const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
-  const int tasks_capped_cores = Min(cores, tasks_capped_pages);
-  const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores);
-  return tasks_capped_hard;
+  const int tasks_capped_pages = Min(pages, tasks);
+  return Min(available_cores, tasks_capped_pages);
 }
 
 
 void MarkCompactCollector::EvacuatePagesInParallel() {
-  const int num_pages = evacuation_candidates_.length();
-  if (num_pages == 0) return;
+  int num_pages = 0;
+  intptr_t live_bytes = 0;
+  for (Page* page : evacuation_candidates_) {
+    num_pages++;
+    live_bytes += page->LiveBytes();
+  }
+  for (NewSpacePage* page : newspace_evacuation_candidates_) {
+    num_pages++;
+    live_bytes += page->LiveBytes();
+  }
+  DCHECK_GE(num_pages, 1);
 
   // Used for trace summary.
-  intptr_t live_bytes = 0;
   intptr_t compaction_speed = 0;
   if (FLAG_trace_fragmentation) {
-    for (Page* page : evacuation_candidates_) {
-      live_bytes += page->LiveBytes();
-    }
     compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
   }
-  const int num_tasks = NumberOfParallelCompactionTasks();
+
+  const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes);
 
   // Set up compaction spaces.
-  CompactionSpaceCollection** compaction_spaces_for_tasks =
-      new CompactionSpaceCollection*[num_tasks];
+  Evacuator** evacuators = new Evacuator*[num_tasks];
   for (int i = 0; i < num_tasks; i++) {
-    compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
+    evacuators[i] = new Evacuator(this, evacuation_candidates_,
+                                  newspace_evacuation_candidates_);
   }
 
-  heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
-                                                  num_tasks);
-  heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
-                                                   num_tasks);
-
-  uint32_t* task_ids = new uint32_t[num_tasks - 1];
   // Kick off parallel tasks.
-  StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks);
+  StartParallelCompaction(evacuators, num_tasks);
   // Wait for unfinished and not-yet-started tasks.
-  WaitUntilCompactionCompleted(task_ids, num_tasks - 1);
-  delete[] task_ids;
+  WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1);
 
-  double compaction_duration = 0.0;
-  intptr_t compacted_memory = 0;
-  // Merge back memory (compacted and unused) from compaction spaces.
+  // Finalize local evacuators by merging back all locally cached data.
   for (int i = 0; i < num_tasks; i++) {
-    heap()->old_space()->MergeCompactionSpace(
-        compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
-    heap()->code_space()->MergeCompactionSpace(
-        compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
-    compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
-    compaction_duration += compaction_spaces_for_tasks[i]->duration();
-    delete compaction_spaces_for_tasks[i];
+    evacuators[i]->Finalize();
+    delete evacuators[i];
   }
-  delete[] compaction_spaces_for_tasks;
-  heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
+  delete[] evacuators;
 
-  // Finalize sequentially.
+  // Finalize pages sequentially.
+  for (NewSpacePage* p : newspace_evacuation_candidates_) {
+    DCHECK_EQ(p->parallel_compaction_state().Value(),
+              MemoryChunk::kCompactingFinalize);
+    p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
+  }
+
   int abandoned_pages = 0;
-  for (int i = 0; i < num_pages; i++) {
-    Page* p = evacuation_candidates_[i];
+  for (Page* p : evacuation_candidates_) {
     switch (p->parallel_compaction_state().Value()) {
       case MemoryChunk::ParallelCompactingState::kCompactingAborted:
         // We have partially compacted the page, i.e., some objects may have
@@ -3222,12 +3337,11 @@
         //   entries of such pages are filtered before rescanning.
         DCHECK(p->IsEvacuationCandidate());
         p->SetFlag(Page::COMPACTION_WAS_ABORTED);
-        p->set_scan_on_scavenge(true);
         abandoned_pages++;
         break;
       case MemoryChunk::kCompactingFinalize:
         DCHECK(p->IsEvacuationCandidate());
-        p->SetWasSwept();
+        DCHECK(p->SweepingDone());
         p->Unlink();
         break;
       case MemoryChunk::kCompactingDone:
@@ -3235,7 +3349,7 @@
         DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
         break;
       default:
-        // We should not observe kCompactingInProgress, or kCompactingDone.
+        // MemoryChunk::kCompactingInProgress.
         UNREACHABLE();
     }
     p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
@@ -3252,31 +3366,28 @@
   }
 }
 
-
-void MarkCompactCollector::StartParallelCompaction(
-    CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids,
-    int len) {
+void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators,
+                                                   int len) {
   compaction_in_progress_ = true;
   for (int i = 1; i < len; i++) {
-    CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]);
-    task_ids[i - 1] = task->id();
+    CompactionTask* task = new CompactionTask(heap(), evacuators[i]);
     V8::GetCurrentPlatform()->CallOnBackgroundThread(
         task, v8::Platform::kShortRunningTask);
   }
 
-  // Contribute in main thread.
-  EvacuatePages(compaction_spaces[0], &migration_slots_buffer_);
+  // Contribute on main thread.
+  evacuators[0]->EvacuatePages();
 }
 
-
-void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
+void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators,
                                                         int len) {
   // Try to cancel compaction tasks that have not been run (as they might be
   // stuck in a worker queue). Tasks that cannot be canceled, have either
   // already completed or are still running, hence we need to wait for their
   // semaphore signal.
   for (int i = 0; i < len; i++) {
-    if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) {
+    if (!heap()->isolate()->cancelable_task_manager()->TryAbort(
+            evacuators[i]->task_id())) {
       pending_compaction_tasks_semaphore_.Wait();
     }
   }
@@ -3284,45 +3395,6 @@
 }
 
 
-void MarkCompactCollector::EvacuatePages(
-    CompactionSpaceCollection* compaction_spaces,
-    SlotsBuffer** evacuation_slots_buffer) {
-  EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
-                                  evacuation_slots_buffer);
-  for (int i = 0; i < evacuation_candidates_.length(); i++) {
-    Page* p = evacuation_candidates_[i];
-    DCHECK(p->IsEvacuationCandidate() ||
-           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
-    DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
-           MemoryChunk::kSweepingDone);
-    if (p->parallel_compaction_state().TrySetValue(
-            MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
-      if (p->IsEvacuationCandidate()) {
-        DCHECK_EQ(p->parallel_compaction_state().Value(),
-                  MemoryChunk::kCompactingInProgress);
-        double start = heap()->MonotonicallyIncreasingTimeInMs();
-        intptr_t live_bytes = p->LiveBytes();
-        AlwaysAllocateScope always_allocate(isolate());
-        if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
-          p->ResetLiveBytes();
-          p->parallel_compaction_state().SetValue(
-              MemoryChunk::kCompactingFinalize);
-          compaction_spaces->ReportCompactionProgress(
-              heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
-        } else {
-          p->parallel_compaction_state().SetValue(
-              MemoryChunk::kCompactingAborted);
-        }
-      } else {
-        // There could be popular pages in the list of evacuation candidates
-        // which we do compact.
-        p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
-      }
-    }
-  }
-}
-
-
 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
  public:
   virtual Object* RetainAs(Object* object) {
@@ -3369,7 +3441,7 @@
           FreeSpaceTreatmentMode free_space_mode>
 static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
                  ObjectVisitor* v) {
-  DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
+  DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
   DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
             space->identity() == CODE_SPACE);
   DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
@@ -3432,14 +3504,7 @@
     freed_bytes = Free<parallelism>(space, free_list, free_start, size);
     max_freed_bytes = Max(freed_bytes, max_freed_bytes);
   }
-
-  if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
-    // When concurrent sweeping is active, the page will be marked after
-    // sweeping by the main thread.
-    p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
-  } else {
-    p->SetWasSwept();
-  }
+  p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
   return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
 }
 
@@ -3472,9 +3537,7 @@
                                              Address end_slot) {
   // Remove entries by replacing them with an old-space slot containing a smi
   // that is located in an unmovable page.
-  int npages = evacuation_candidates_.length();
-  for (int i = 0; i < npages; i++) {
-    Page* p = evacuation_candidates_[i];
+  for (Page* p : evacuation_candidates_) {
     DCHECK(p->IsEvacuationCandidate() ||
            p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
     if (p->IsEvacuationCandidate()) {
@@ -3512,6 +3575,10 @@
         page->markbits()->ClearRange(
             page->AddressToMarkbitIndex(page->area_start()),
             page->AddressToMarkbitIndex(object->address()));
+        if (page->old_to_new_slots() != nullptr) {
+          page->old_to_new_slots()->RemoveRange(
+              0, static_cast<int>(object->address() - page->address()));
+        }
         RecomputeLiveBytes(page);
       }
       return false;
@@ -3554,10 +3621,10 @@
 
 void MarkCompactCollector::SweepAbortedPages() {
   // Second pass on aborted pages.
-  for (int i = 0; i < evacuation_candidates_.length(); i++) {
-    Page* p = evacuation_candidates_[i];
+  for (Page* p : evacuation_candidates_) {
     if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
       p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
+      p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
       PagedSpace* space = static_cast<PagedSpace*>(p->owner());
       switch (space->identity()) {
         case OLD_SPACE:
@@ -3586,30 +3653,25 @@
   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
   Heap::RelocationLock relocation_lock(heap());
 
-  HashMap* local_pretenuring_feedback = nullptr;
   {
     GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
     EvacuationScope evacuation_scope(this);
+
     EvacuateNewSpacePrologue();
-    local_pretenuring_feedback = EvacuateNewSpaceInParallel();
-    heap_->new_space()->set_age_mark(heap_->new_space()->top());
-  }
-
-  {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_EVACUATE_CANDIDATES);
-    EvacuationScope evacuation_scope(this);
     EvacuatePagesInParallel();
-  }
-
-  {
-    heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
-    delete local_pretenuring_feedback;
+    EvacuateNewSpaceEpilogue();
+    heap()->new_space()->set_age_mark(heap()->new_space()->top());
   }
 
   UpdatePointersAfterEvacuation();
 
+  // Give pages that are queued to be freed back to the OS. Note that filtering
+  // slots only handles old space (for unboxed doubles), and thus map space can
+  // still contain stale pointers. We only free the chunks after pointer updates
+  // to still have access to page headers.
+  heap()->FreeQueuedChunks();
+
   {
     GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
@@ -3677,18 +3739,14 @@
     // Update roots.
     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
 
-    StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
-                                  &Heap::ScavengeStoreBufferCallback);
-    heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
+    RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap_, UpdatePointer);
   }
 
-  int npages = evacuation_candidates_.length();
   {
     GCTracer::Scope gc_scope(
         heap()->tracer(),
         GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
-    for (int i = 0; i < npages; i++) {
-      Page* p = evacuation_candidates_[i];
+    for (Page* p : evacuation_candidates_) {
       DCHECK(p->IsEvacuationCandidate() ||
              p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
 
@@ -3720,6 +3778,7 @@
         }
         PagedSpace* space = static_cast<PagedSpace*>(p->owner());
         p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+        p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
 
         switch (space->identity()) {
           case OLD_SPACE:
@@ -3761,51 +3820,38 @@
 }
 
 
-void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
-  int npages = evacuation_candidates_.length();
-  for (int i = 0; i < npages; i++) {
-    Page* p = evacuation_candidates_[i];
-    if (!p->IsEvacuationCandidate()) continue;
-    p->Unlink();
-    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-    p->InsertAfter(space->LastPage());
-  }
-}
-
-
 void MarkCompactCollector::ReleaseEvacuationCandidates() {
-  int npages = evacuation_candidates_.length();
-  for (int i = 0; i < npages; i++) {
-    Page* p = evacuation_candidates_[i];
+  for (Page* p : evacuation_candidates_) {
     if (!p->IsEvacuationCandidate()) continue;
     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
     space->Free(p->area_start(), p->area_size());
-    p->set_scan_on_scavenge(false);
     p->ResetLiveBytes();
-    CHECK(p->WasSwept());
-    space->ReleasePage(p);
+    CHECK(p->SweepingDone());
+    space->ReleasePage(p, true);
   }
   evacuation_candidates_.Rewind(0);
   compacting_ = false;
-  heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
   heap()->FreeQueuedChunks();
 }
 
 
 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
-                                          int required_freed_bytes) {
+                                          int required_freed_bytes,
+                                          int max_pages) {
   int max_freed = 0;
   int max_freed_overall = 0;
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
+  int page_count = 0;
+  for (Page* p : sweeping_list(space)) {
     max_freed = SweepInParallel(p, space);
     DCHECK(max_freed >= 0);
     if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
       return max_freed;
     }
     max_freed_overall = Max(max_freed, max_freed_overall);
-    if (p == space->end_of_unswept_pages()) break;
+    page_count++;
+    if (max_pages > 0 && page_count >= max_pages) {
+      break;
+    }
   }
   return max_freed_overall;
 }
@@ -3813,14 +3859,13 @@
 
 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
   int max_freed = 0;
-  if (page->TryLock()) {
+  if (page->mutex()->TryLock()) {
     // If this page was already swept in the meantime, we can return here.
-    if (page->parallel_sweeping_state().Value() !=
-        MemoryChunk::kSweepingPending) {
+    if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
       page->mutex()->Unlock();
       return 0;
     }
-    page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress);
+    page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
     FreeList* free_list;
     FreeList private_free_list(space);
     if (space->identity() == OLD_SPACE) {
@@ -3840,6 +3885,7 @@
                 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
     }
     free_list->Concatenate(&private_free_list);
+    page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
     page->mutex()->Unlock();
   }
   return max_freed;
@@ -3849,22 +3895,14 @@
 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
   space->ClearStats();
 
-  // We defensively initialize end_of_unswept_pages_ here with the first page
-  // of the pages list.
-  space->set_end_of_unswept_pages(space->FirstPage());
-
   PageIterator it(space);
 
-  int pages_swept = 0;
+  int will_be_swept = 0;
   bool unused_page_present = false;
-  bool parallel_sweeping_active = false;
 
   while (it.has_next()) {
     Page* p = it.next();
-    DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
-
-    // Clear sweeping flags indicating that marking bits are still intact.
-    p->ClearWasSwept();
+    DCHECK(p->SweepingDone());
 
     if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
         p->IsEvacuationCandidate()) {
@@ -3878,6 +3916,7 @@
       // that this adds unusable memory into the free list that is later on
       // (in the free list) dropped again. Since we only use the flag for
       // testing this is fine.
+      p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
       Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
             IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
       continue;
@@ -3889,45 +3928,25 @@
         if (FLAG_gc_verbose) {
           PrintIsolate(isolate(), "sweeping: released page: %p", p);
         }
-        space->ReleasePage(p);
+        space->ReleasePage(p, false);
         continue;
       }
       unused_page_present = true;
     }
 
-    if (!parallel_sweeping_active) {
-      if (FLAG_gc_verbose) {
-        PrintIsolate(isolate(), "sweeping: %p", p);
-      }
-      if (space->identity() == CODE_SPACE) {
-        if (FLAG_zap_code_space) {
-          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                ZAP_FREE_SPACE>(space, NULL, p, NULL);
-        } else {
-          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
-        }
-      } else {
-        Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
-              IGNORE_FREE_SPACE>(space, NULL, p, NULL);
-      }
-      pages_swept++;
-      parallel_sweeping_active = true;
-    } else {
-      if (FLAG_gc_verbose) {
-        PrintIsolate(isolate(), "sweeping: initialized for parallel: %p", p);
-      }
-      p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
-      int to_sweep = p->area_size() - p->LiveBytes();
-      space->accounting_stats_.ShrinkSpace(to_sweep);
-    }
-    space->set_end_of_unswept_pages(p);
+    p->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
+    sweeping_list(space).push_back(p);
+    int to_sweep = p->area_size() - p->LiveBytes();
+    space->accounting_stats_.ShrinkSpace(to_sweep);
+    will_be_swept++;
   }
 
   if (FLAG_gc_verbose) {
-    PrintIsolate(isolate(), "sweeping: space=%s pages_swept=%d",
-                 AllocationSpaceName(space->identity()), pages_swept);
+    PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
+                 AllocationSpaceName(space->identity()), will_be_swept);
   }
+  std::sort(sweeping_list(space).begin(), sweeping_list(space).end(),
+            [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
 }
 
 
@@ -3942,8 +3961,6 @@
   state_ = SWEEP_SPACES;
 #endif
 
-  MoveEvacuationCandidatesToEndOfPagesList();
-
   {
     sweeping_in_progress_ = true;
     {
@@ -3969,10 +3986,6 @@
   // Deallocate unmarked large objects.
   heap_->lo_space()->FreeUnmarkedObjects();
 
-  // Give pages that are queued to be freed back to the OS. Invalid store
-  // buffer entries are already filter out. We can just release the memory.
-  heap()->FreeQueuedChunks();
-
   if (FLAG_print_cumulative_gc_stat) {
     heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
                                      start_time);
@@ -3980,24 +3993,10 @@
 }
 
 
-void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
-    if (p->parallel_sweeping_state().Value() ==
-        MemoryChunk::kSweepingFinalize) {
-      p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
-      p->SetWasSwept();
-    }
-    DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
-  }
-}
-
-
 void MarkCompactCollector::ParallelSweepSpacesComplete() {
-  ParallelSweepSpaceComplete(heap()->old_space());
-  ParallelSweepSpaceComplete(heap()->code_space());
-  ParallelSweepSpaceComplete(heap()->map_space());
+  sweeping_list(heap()->old_space()).clear();
+  sweeping_list(heap()->code_space()).clear();
+  sweeping_list(heap()->map_space()).clear();
 }
 
 
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index cfb2d9d..cc5449f 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -7,6 +7,7 @@
 
 #include "src/base/bits.h"
 #include "src/heap/spaces.h"
+#include "src/heap/store-buffer.h"
 
 namespace v8 {
 namespace internal {
@@ -406,7 +407,8 @@
 
   void MigrateObject(HeapObject* dst, HeapObject* src, int size,
                      AllocationSpace to_old_space,
-                     SlotsBuffer** evacuation_slots_buffer);
+                     SlotsBuffer** evacuation_slots_buffer,
+                     LocalStoreBuffer* local_store_buffer);
 
   void InvalidateCode(Code* code);
 
@@ -421,7 +423,8 @@
   // required_freed_bytes was freed. If required_freed_bytes was set to zero
   // then the whole given space is swept. It returns the size of the maximum
   // continuous freed memory chunk.
-  int SweepInParallel(PagedSpace* space, int required_freed_bytes);
+  int SweepInParallel(PagedSpace* space, int required_freed_bytes,
+                      int max_pages = 0);
 
   // Sweeps a given page concurrently to the sweeper threads. It returns the
   // size of the maximum continuous freed memory chunk.
@@ -508,10 +511,11 @@
   class EvacuateNewSpaceVisitor;
   class EvacuateOldSpaceVisitor;
   class EvacuateVisitorBase;
+  class Evacuator;
   class HeapObjectVisitor;
   class SweeperTask;
 
-  static const int kInitialLocalPretenuringFeedbackCapacity = 256;
+  typedef std::vector<Page*> SweepingList;
 
   explicit MarkCompactCollector(Heap* heap);
 
@@ -693,31 +697,26 @@
   //          evacuation.
   //
 
+  inline SweepingList& sweeping_list(Space* space);
+
   // If we are not compacting the heap, we simply sweep the spaces except
   // for the large object space, clearing mark bits and adding unmarked
   // regions to each space's free list.
   void SweepSpaces();
 
   void EvacuateNewSpacePrologue();
-
-  // Returns local pretenuring feedback.
-  HashMap* EvacuateNewSpaceInParallel();
+  void EvacuateNewSpaceEpilogue();
 
   void AddEvacuationSlotsBufferSynchronized(
       SlotsBuffer* evacuation_slots_buffer);
 
-  void EvacuatePages(CompactionSpaceCollection* compaction_spaces,
-                     SlotsBuffer** evacuation_slots_buffer);
-
   void EvacuatePagesInParallel();
 
   // The number of parallel compaction tasks, including the main thread.
-  int NumberOfParallelCompactionTasks();
+  int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
 
-
-  void StartParallelCompaction(CompactionSpaceCollection** compaction_spaces,
-                               uint32_t* task_ids, int len);
-  void WaitUntilCompactionCompleted(uint32_t* task_ids, int len);
+  void StartParallelCompaction(Evacuator** evacuators, int len);
+  void WaitUntilCompactionCompleted(Evacuator** evacuators, int len);
 
   void EvacuateNewSpaceAndCandidates();
 
@@ -736,10 +735,6 @@
 
   void ReleaseEvacuationCandidates();
 
-  // Moves the pages of the evacuation_candidates_ list to the end of their
-  // corresponding space pages list.
-  void MoveEvacuationCandidatesToEndOfPagesList();
-
   // Starts sweeping of a space by contributing on the main thread and setting
   // up other pages for sweeping.
   void StartSweepSpace(PagedSpace* space);
@@ -748,11 +743,10 @@
   // swept in parallel.
   void ParallelSweepSpacesComplete();
 
-  void ParallelSweepSpaceComplete(PagedSpace* space);
-
   // Updates store buffer and slot buffer for a pointer in a migrating object.
   void RecordMigratedSlot(Object* value, Address slot,
-                          SlotsBuffer** evacuation_slots_buffer);
+                          SlotsBuffer** evacuation_slots_buffer,
+                          LocalStoreBuffer* local_store_buffer);
 
   // Adds the code entry slot to the slots buffer.
   void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
@@ -778,8 +772,7 @@
   bool have_code_to_deoptimize_;
 
   List<Page*> evacuation_candidates_;
-
-  List<MemoryChunk*> newspace_evacuation_candidates_;
+  List<NewSpacePage*> newspace_evacuation_candidates_;
 
   // The evacuation_slots_buffers_ are used by the compaction threads.
   // When a compaction task finishes, it uses
@@ -793,6 +786,10 @@
   base::SmartPointer<FreeList> free_list_code_space_;
   base::SmartPointer<FreeList> free_list_map_space_;
 
+  SweepingList sweeping_list_old_space_;
+  SweepingList sweeping_list_code_space_;
+  SweepingList sweeping_list_map_space_;
+
   // True if we are collecting slots to perform evacuation from evacuation
   // candidates.
   bool compacting_;
diff --git a/src/heap/memory-reducer.cc b/src/heap/memory-reducer.cc
index ee10091..f537307 100644
--- a/src/heap/memory-reducer.cc
+++ b/src/heap/memory-reducer.cc
@@ -47,7 +47,7 @@
   event.should_start_incremental_gc = is_idle || optimize_for_memory;
   event.can_start_incremental_gc =
       heap->incremental_marking()->IsStopped() &&
-      heap->incremental_marking()->CanBeActivated();
+      (heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
   memory_reducer_->NotifyTimer(event);
 }
 
@@ -118,9 +118,8 @@
   }
 }
 
-
-void MemoryReducer::NotifyContextDisposed(const Event& event) {
-  DCHECK_EQ(kContextDisposed, event.type);
+void MemoryReducer::NotifyPossibleGarbage(const Event& event) {
+  DCHECK_EQ(kPossibleGarbage, event.type);
   Action old_action = state_.action;
   state_ = Step(state_, event);
   if (old_action != kWait && state_.action == kWait) {
@@ -147,14 +146,14 @@
       if (event.type == kTimer) {
         return state;
       } else {
-        DCHECK(event.type == kContextDisposed || event.type == kMarkCompact);
+        DCHECK(event.type == kPossibleGarbage || event.type == kMarkCompact);
         return State(
             kWait, 0, event.time_ms + kLongDelayMs,
             event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms);
       }
     case kWait:
       switch (event.type) {
-        case kContextDisposed:
+        case kPossibleGarbage:
           return state;
         case kTimer:
           if (state.started_gcs >= kMaxNumberOfGCs) {
diff --git a/src/heap/memory-reducer.h b/src/heap/memory-reducer.h
index 9213613..0fe53e5 100644
--- a/src/heap/memory-reducer.h
+++ b/src/heap/memory-reducer.h
@@ -96,7 +96,7 @@
     double last_gc_time_ms;
   };
 
-  enum EventType { kTimer, kMarkCompact, kContextDisposed };
+  enum EventType { kTimer, kMarkCompact, kPossibleGarbage };
 
   struct Event {
     EventType type;
@@ -113,7 +113,7 @@
         js_calls_sample_time_ms_(0.0) {}
   // Callbacks.
   void NotifyMarkCompact(const Event& event);
-  void NotifyContextDisposed(const Event& event);
+  void NotifyPossibleGarbage(const Event& event);
   void NotifyBackgroundIdleNotification(const Event& event);
   // The step function that computes the next state from the current state and
   // the incoming event.
diff --git a/src/heap/objects-visiting-inl.h b/src/heap/objects-visiting-inl.h
index a29ba4b..d71c879 100644
--- a/src/heap/objects-visiting-inl.h
+++ b/src/heap/objects-visiting-inl.h
@@ -115,7 +115,7 @@
   VisitPointers(
       map->GetHeap(), object,
       HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
-      HeapObject::RawField(object, BytecodeArray::kHeaderSize));
+      HeapObject::RawField(object, BytecodeArray::kFrameSizeOffset));
   return reinterpret_cast<BytecodeArray*>(object)->BytecodeArraySize();
 }
 
@@ -531,7 +531,7 @@
   StaticVisitor::VisitPointers(
       map->GetHeap(), object,
       HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
-      HeapObject::RawField(object, BytecodeArray::kHeaderSize));
+      HeapObject::RawField(object, BytecodeArray::kFrameSizeOffset));
 }
 
 
diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc
index 315c897..0003a07 100644
--- a/src/heap/objects-visiting.cc
+++ b/src/heap/objects-visiting.cc
@@ -118,7 +118,6 @@
     case JS_MAP_TYPE:
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
-    case JS_ITERATOR_RESULT_TYPE:
     case JS_PROMISE_TYPE:
     case JS_BOUND_FUNCTION_TYPE:
       return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
diff --git a/src/heap/remembered-set.cc b/src/heap/remembered-set.cc
new file mode 100644
index 0000000..d9d5914
--- /dev/null
+++ b/src/heap/remembered-set.cc
@@ -0,0 +1,69 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/remembered-set.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/slot-set.h"
+#include "src/heap/spaces.h"
+#include "src/heap/store-buffer.h"
+
+namespace v8 {
+namespace internal {
+
+template <PointerDirection direction>
+void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
+  STATIC_ASSERT(direction == OLD_TO_NEW);
+  PageIterator it(heap->old_space());
+  MemoryChunk* chunk;
+  while (it.has_next()) {
+    chunk = it.next();
+    SlotSet* slots = GetSlotSet(chunk);
+    if (slots != nullptr) {
+      slots->Iterate([heap](Address addr) {
+        Object** slot = reinterpret_cast<Object**>(addr);
+        return IsValidSlot(heap, slot) ? SlotSet::KEEP_SLOT
+                                       : SlotSet::REMOVE_SLOT;
+      });
+    }
+  }
+}
+
+template <PointerDirection direction>
+void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
+  STATIC_ASSERT(direction == OLD_TO_NEW);
+  Iterate(heap, [heap](Address addr) {
+    Object** slot = reinterpret_cast<Object**>(addr);
+    Object* object = *slot;
+    if (Page::FromAddress(addr)->owner() != nullptr &&
+        Page::FromAddress(addr)->owner()->identity() == OLD_SPACE) {
+      CHECK(IsValidSlot(heap, slot));
+      heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
+          reinterpret_cast<Address>(slot), HeapObject::cast(object));
+    }
+    return SlotSet::KEEP_SLOT;
+  });
+}
+
+template <PointerDirection direction>
+bool RememberedSet<direction>::IsValidSlot(Heap* heap, Object** slot) {
+  STATIC_ASSERT(direction == OLD_TO_NEW);
+  Object* object = *slot;
+  if (!heap->InNewSpace(object)) {
+    return false;
+  }
+  HeapObject* heap_object = HeapObject::cast(object);
+  // If the target object is not black, the source slot must be part
+  // of a non-black (dead) object.
+  return Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
+         heap->mark_compact_collector()->IsSlotInLiveObject(
+             reinterpret_cast<Address>(slot));
+}
+
+template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
+template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/remembered-set.h b/src/heap/remembered-set.h
new file mode 100644
index 0000000..351d76e
--- /dev/null
+++ b/src/heap/remembered-set.h
@@ -0,0 +1,157 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REMEMBERED_SET_H
+#define V8_REMEMBERED_SET_H
+
+#include "src/heap/heap.h"
+#include "src/heap/slot-set.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+enum PointerDirection { OLD_TO_OLD, OLD_TO_NEW };
+
+template <PointerDirection direction>
+class RememberedSet {
+ public:
+  // Given a page and a slot in that page, this function adds the slot to the
+  // remembered set.
+  static void Insert(Page* page, Address slot_addr) {
+    DCHECK(page->Contains(slot_addr));
+    SlotSet* slot_set = GetSlotSet(page);
+    if (slot_set == nullptr) {
+      slot_set = AllocateSlotSet(page);
+    }
+    uintptr_t offset = slot_addr - page->address();
+    slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
+  }
+
+  // Given a page and a slot in that page, this function removes the slot from
+  // the remembered set.
+  // If the slot was never added, then the function does nothing.
+  static void Remove(Page* page, Address slot_addr) {
+    DCHECK(page->Contains(slot_addr));
+    SlotSet* slot_set = GetSlotSet(page);
+    if (slot_set != nullptr) {
+      uintptr_t offset = slot_addr - page->address();
+      slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize);
+    }
+  }
+
+  // Given a page and a range of slots in that page, this function removes the
+  // slots from the remembered set.
+  static void RemoveRange(Page* page, Address start, Address end) {
+    SlotSet* slot_set = GetSlotSet(page);
+    if (slot_set != nullptr) {
+      uintptr_t start_offset = start - page->address();
+      uintptr_t end_offset = end - page->address();
+      DCHECK_LT(start_offset, end_offset);
+      DCHECK_LE(end_offset, static_cast<uintptr_t>(Page::kPageSize));
+      slot_set->RemoveRange(static_cast<uint32_t>(start_offset),
+                            static_cast<uint32_t>(end_offset));
+    }
+  }
+
+  // Iterates and filters the remembered set with the given callback.
+  // The callback should take (Address slot) and return SlotSet::CallbackResult.
+  template <typename Callback>
+  static void Iterate(Heap* heap, Callback callback) {
+    PointerChunkIterator it(heap);
+    MemoryChunk* chunk;
+    while ((chunk = it.next()) != nullptr) {
+      SlotSet* slots = GetSlotSet(chunk);
+      if (slots != nullptr) {
+        size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
+        int new_count = 0;
+        for (size_t page = 0; page < pages; page++) {
+          new_count += slots[page].Iterate(callback);
+        }
+        if (new_count == 0) {
+          ReleaseSlotSet(chunk);
+        }
+      }
+    }
+  }
+
+  // Iterates and filters the remembered set with the given callback.
+  // The callback should take (HeapObject** slot, HeapObject* target) and
+  // update the slot.
+  // A special wrapper takes care of filtering the slots based on their values.
+  // For OLD_TO_NEW case: slots that do not point to the ToSpace after
+  // callback invocation will be removed from the set.
+  template <typename Callback>
+  static void IterateWithWrapper(Heap* heap, Callback callback) {
+    Iterate(heap, [heap, callback](Address addr) {
+      return Wrapper(heap, addr, callback);
+    });
+  }
+
+  // Eliminates all stale slots from the remembered set, i.e.
+  // slots that are not part of live objects anymore. This method must be
+  // called after marking, when the whole transitive closure is known and
+  // must be called before sweeping when mark bits are still intact.
+  static void ClearInvalidSlots(Heap* heap);
+
+  static void VerifyValidSlots(Heap* heap);
+
+ private:
+  static SlotSet* GetSlotSet(MemoryChunk* chunk) {
+    if (direction == OLD_TO_OLD) {
+      return chunk->old_to_old_slots();
+    } else {
+      return chunk->old_to_new_slots();
+    }
+  }
+
+  static void ReleaseSlotSet(MemoryChunk* chunk) {
+    if (direction == OLD_TO_OLD) {
+      chunk->ReleaseOldToOldSlots();
+    } else {
+      chunk->ReleaseOldToNewSlots();
+    }
+  }
+
+  static SlotSet* AllocateSlotSet(MemoryChunk* chunk) {
+    if (direction == OLD_TO_OLD) {
+      chunk->AllocateOldToOldSlots();
+      return chunk->old_to_old_slots();
+    } else {
+      chunk->AllocateOldToNewSlots();
+      return chunk->old_to_new_slots();
+    }
+  }
+
+  template <typename Callback>
+  static SlotSet::CallbackResult Wrapper(Heap* heap, Address slot_address,
+                                         Callback slot_callback) {
+    STATIC_ASSERT(direction == OLD_TO_NEW);
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    Object* object = *slot;
+    if (heap->InFromSpace(object)) {
+      HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+      DCHECK(heap_object->IsHeapObject());
+      slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+      object = *slot;
+      // If the object was in from space before and is after executing the
+      // callback in to space, the object is still live.
+      // Unfortunately, we do not know about the slot. It could be in a
+      // just freed free space object.
+      if (heap->InToSpace(object)) {
+        return SlotSet::KEEP_SLOT;
+      }
+    } else {
+      DCHECK(!heap->InNewSpace(object));
+    }
+    return SlotSet::REMOVE_SLOT;
+  }
+
+  static bool IsValidSlot(Heap* heap, Object** slot);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_REMEMBERED_SET_H
diff --git a/src/heap/scavenger-inl.h b/src/heap/scavenger-inl.h
index cd35c7d..b8fd1c8 100644
--- a/src/heap/scavenger-inl.h
+++ b/src/heap/scavenger-inl.h
@@ -28,7 +28,7 @@
     return;
   }
 
-  object->GetHeap()->UpdateAllocationSite(
+  object->GetHeap()->UpdateAllocationSite<Heap::kGlobal>(
       object, object->GetHeap()->global_pretenuring_feedback_);
 
   // AllocationMementos are unrooted and shouldn't survive a scavenge
diff --git a/src/heap/slot-set.h b/src/heap/slot-set.h
new file mode 100644
index 0000000..6144706
--- /dev/null
+++ b/src/heap/slot-set.h
@@ -0,0 +1,219 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SLOT_SET_H
+#define V8_SLOT_SET_H
+
+#include "src/allocation.h"
+#include "src/base/bits.h"
+
+namespace v8 {
+namespace internal {
+
+// Data structure for maintaining a set of slots in a standard (non-large)
+// page. The base address of the page must be set with SetPageStart before any
+// operation.
+// The data structure assumes that the slots are pointer size aligned and
+// splits the valid slot offset range into kBuckets buckets.
+// Each bucket is a bitmap with a bit corresponding to a single slot offset.
+class SlotSet : public Malloced {
+ public:
+  enum CallbackResult { KEEP_SLOT, REMOVE_SLOT };
+
+  SlotSet() {
+    for (int i = 0; i < kBuckets; i++) {
+      bucket[i] = nullptr;
+    }
+  }
+
+  ~SlotSet() {
+    for (int i = 0; i < kBuckets; i++) {
+      ReleaseBucket(i);
+    }
+  }
+
+  void SetPageStart(Address page_start) { page_start_ = page_start; }
+
+  // The slot offset specifies a slot at address page_start_ + slot_offset.
+  void Insert(int slot_offset) {
+    int bucket_index, cell_index, bit_index;
+    SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+    if (bucket[bucket_index] == nullptr) {
+      bucket[bucket_index] = AllocateBucket();
+    }
+    bucket[bucket_index][cell_index] |= 1u << bit_index;
+  }
+
+  // The slot offset specifies a slot at address page_start_ + slot_offset.
+  void Remove(int slot_offset) {
+    int bucket_index, cell_index, bit_index;
+    SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+    if (bucket[bucket_index] != nullptr) {
+      uint32_t cell = bucket[bucket_index][cell_index];
+      if (cell) {
+        uint32_t bit_mask = 1u << bit_index;
+        if (cell & bit_mask) {
+          bucket[bucket_index][cell_index] ^= bit_mask;
+        }
+      }
+    }
+  }
+
+  // The slot offsets specify a range of slots at addresses:
+  // [page_start_ + start_offset ... page_start_ + end_offset).
+  void RemoveRange(int start_offset, int end_offset) {
+    DCHECK_LE(start_offset, end_offset);
+    int start_bucket, start_cell, start_bit;
+    SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
+    int end_bucket, end_cell, end_bit;
+    SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
+    uint32_t start_mask = (1u << start_bit) - 1;
+    uint32_t end_mask = ~((1u << end_bit) - 1);
+    if (start_bucket == end_bucket && start_cell == end_cell) {
+      MaskCell(start_bucket, start_cell, start_mask | end_mask);
+      return;
+    }
+    int current_bucket = start_bucket;
+    int current_cell = start_cell;
+    MaskCell(current_bucket, current_cell, start_mask);
+    current_cell++;
+    if (current_bucket < end_bucket) {
+      if (bucket[current_bucket] != nullptr) {
+        while (current_cell < kCellsPerBucket) {
+          bucket[current_bucket][current_cell] = 0;
+          current_cell++;
+        }
+      }
+      // The rest of the current bucket is cleared.
+      // Move on to the next bucket.
+      current_bucket++;
+      current_cell = 0;
+    }
+    DCHECK(current_bucket == end_bucket ||
+           (current_bucket < end_bucket && current_cell == 0));
+    while (current_bucket < end_bucket) {
+      ReleaseBucket(current_bucket);
+      current_bucket++;
+    }
+    // All buckets between start_bucket and end_bucket are cleared.
+    DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
+    if (current_bucket == kBuckets || bucket[current_bucket] == nullptr) {
+      return;
+    }
+    while (current_cell < end_cell) {
+      bucket[current_bucket][current_cell] = 0;
+      current_cell++;
+    }
+    // All cells between start_cell and end_cell are cleared.
+    DCHECK(current_bucket == end_bucket && current_cell == end_cell);
+    MaskCell(end_bucket, end_cell, end_mask);
+  }
+
+  // The slot offset specifies a slot at address page_start_ + slot_offset.
+  bool Lookup(int slot_offset) {
+    int bucket_index, cell_index, bit_index;
+    SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+    if (bucket[bucket_index] != nullptr) {
+      uint32_t cell = bucket[bucket_index][cell_index];
+      return (cell & (1u << bit_index)) != 0;
+    }
+    return false;
+  }
+
+  // Iterate over all slots in the set and for each slot invoke the callback.
+  // If the callback returns REMOVE_SLOT then the slot is removed from the set.
+  // Returns the new number of slots.
+  //
+  // Sample usage:
+  // Iterate([](Address slot_address) {
+  //    if (good(slot_address)) return KEEP_SLOT;
+  //    else return REMOVE_SLOT;
+  // });
+  template <typename Callback>
+  int Iterate(Callback callback) {
+    int new_count = 0;
+    for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
+      if (bucket[bucket_index] != nullptr) {
+        int in_bucket_count = 0;
+        uint32_t* current_bucket = bucket[bucket_index];
+        int cell_offset = bucket_index * kBitsPerBucket;
+        for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
+          if (current_bucket[i]) {
+            uint32_t cell = current_bucket[i];
+            uint32_t old_cell = cell;
+            uint32_t new_cell = cell;
+            while (cell) {
+              int bit_offset = base::bits::CountTrailingZeros32(cell);
+              uint32_t bit_mask = 1u << bit_offset;
+              uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
+              if (callback(page_start_ + slot) == KEEP_SLOT) {
+                ++in_bucket_count;
+              } else {
+                new_cell ^= bit_mask;
+              }
+              cell ^= bit_mask;
+            }
+            if (old_cell != new_cell) {
+              current_bucket[i] = new_cell;
+            }
+          }
+        }
+        if (in_bucket_count == 0) {
+          ReleaseBucket(bucket_index);
+        }
+        new_count += in_bucket_count;
+      }
+    }
+    return new_count;
+  }
+
+ private:
+  static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
+  static const int kCellsPerBucket = 32;
+  static const int kCellsPerBucketLog2 = 5;
+  static const int kBitsPerCell = 32;
+  static const int kBitsPerCellLog2 = 5;
+  static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
+  static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
+  static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
+
+  uint32_t* AllocateBucket() {
+    uint32_t* result = NewArray<uint32_t>(kCellsPerBucket);
+    for (int i = 0; i < kCellsPerBucket; i++) {
+      result[i] = 0;
+    }
+    return result;
+  }
+
+  void ReleaseBucket(int bucket_index) {
+    DeleteArray<uint32_t>(bucket[bucket_index]);
+    bucket[bucket_index] = nullptr;
+  }
+
+  void MaskCell(int bucket_index, int cell_index, uint32_t mask) {
+    uint32_t* cells = bucket[bucket_index];
+    if (cells != nullptr && cells[cell_index] != 0) {
+      cells[cell_index] &= mask;
+    }
+  }
+
+  // Converts the slot offset into bucket/cell/bit index.
+  void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
+                     int* bit_index) {
+    DCHECK_EQ(slot_offset % kPointerSize, 0);
+    int slot = slot_offset >> kPointerSizeLog2;
+    DCHECK(slot >= 0 && slot <= kMaxSlots);
+    *bucket_index = slot >> kBitsPerBucketLog2;
+    *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
+    *bit_index = slot & (kBitsPerCell - 1);
+  }
+
+  uint32_t* bucket[kBuckets];
+  Address page_start_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SLOT_SET_H
diff --git a/src/heap/slots-buffer.cc b/src/heap/slots-buffer.cc
index 3f145e6..5a3db28 100644
--- a/src/heap/slots-buffer.cc
+++ b/src/heap/slots-buffer.cc
@@ -56,9 +56,12 @@
         // - point to a heap object in new space
         // - are not within a live heap object on a valid pointer slot
         // - point to a heap object not on an evacuation candidate
-        if (!object->IsHeapObject() || heap->InNewSpace(object) ||
+        // TODO(mlippautz): Move InNewSpace check above IsSlotInLiveObject once
+        //   we filter out unboxed double slots eagerly.
+        if (!object->IsHeapObject() ||
             !heap->mark_compact_collector()->IsSlotInLiveObject(
                 reinterpret_cast<Address>(slot)) ||
+            heap->InNewSpace(object) ||
             !Page::FromAddress(reinterpret_cast<Address>(object))
                  ->IsEvacuationCandidate()) {
           // TODO(hpayer): Instead of replacing slots with kRemovedEntry we
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index 3023fbf..515a202 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -178,6 +178,52 @@
 
 #endif
 
+// -----------------------------------------------------------------------------
+// SemiSpace
+
+bool SemiSpace::Contains(HeapObject* o) {
+  return id_ == kToSpace
+             ? MemoryChunk::FromAddress(o->address())->InToSpace()
+             : MemoryChunk::FromAddress(o->address())->InFromSpace();
+}
+
+bool SemiSpace::Contains(Object* o) {
+  return o->IsHeapObject() && Contains(HeapObject::cast(o));
+}
+
+bool SemiSpace::ContainsSlow(Address a) {
+  NewSpacePageIterator it(this);
+  while (it.has_next()) {
+    if (it.next() == MemoryChunk::FromAddress(a)) return true;
+  }
+  return false;
+}
+
+// --------------------------------------------------------------------------
+// NewSpace
+
+bool NewSpace::Contains(HeapObject* o) {
+  return MemoryChunk::FromAddress(o->address())->InNewSpace();
+}
+
+bool NewSpace::Contains(Object* o) {
+  return o->IsHeapObject() && Contains(HeapObject::cast(o));
+}
+
+bool NewSpace::ContainsSlow(Address a) {
+  return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
+}
+
+bool NewSpace::ToSpaceContainsSlow(Address a) {
+  return to_space_.ContainsSlow(a);
+}
+
+bool NewSpace::FromSpaceContainsSlow(Address a) {
+  return from_space_.ContainsSlow(a);
+}
+
+bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
 
 // --------------------------------------------------------------------------
 // AllocationResult
@@ -205,6 +251,36 @@
   return page;
 }
 
+void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
+  MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
+}
+
+void MemoryChunk::ResetLiveBytes() {
+  if (FLAG_trace_live_bytes) {
+    PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n", this,
+                 live_byte_count_);
+  }
+  live_byte_count_ = 0;
+}
+
+void MemoryChunk::IncrementLiveBytes(int by) {
+  if (FLAG_trace_live_bytes) {
+    PrintIsolate(heap()->isolate(),
+                 "live-bytes: update page=%p delta=%d %d->%d\n", this, by,
+                 live_byte_count_, live_byte_count_ + by);
+  }
+  live_byte_count_ += by;
+  DCHECK_GE(live_byte_count_, 0);
+  DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
+}
+
+void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
+  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
+    static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
+  }
+  chunk->IncrementLiveBytes(by);
+}
 
 bool PagedSpace::Contains(Address addr) {
   Page* p = Page::FromAddress(addr);
@@ -212,39 +288,24 @@
   return p->owner() == this;
 }
 
-
-bool PagedSpace::Contains(HeapObject* o) { return Contains(o->address()); }
-
-
-void MemoryChunk::set_scan_on_scavenge(bool scan) {
-  if (scan) {
-    if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
-    SetFlag(SCAN_ON_SCAVENGE);
-  } else {
-    if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
-    ClearFlag(SCAN_ON_SCAVENGE);
-  }
-  heap_->incremental_marking()->SetOldSpacePageFlags(this);
+bool PagedSpace::Contains(Object* o) {
+  if (!o->IsHeapObject()) return false;
+  Page* p = Page::FromAddress(HeapObject::cast(o)->address());
+  if (!p->is_valid()) return false;
+  return p->owner() == this;
 }
 
-
 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
-  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
-      OffsetFrom(addr) & ~Page::kPageAlignmentMask);
-  if (maybe->owner() != NULL) return maybe;
-  LargeObjectIterator iterator(heap->lo_space());
-  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
-    // Fixed arrays are the only pointer-containing objects in large object
-    // space.
-    if (o->IsFixedArray()) {
-      MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
-      if (chunk->Contains(addr)) {
-        return chunk;
-      }
-    }
+  MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
+  uintptr_t offset = addr - chunk->address();
+  if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
+    chunk = heap->lo_space()->FindPage(addr);
   }
-  UNREACHABLE();
-  return NULL;
+  return chunk;
+}
+
+Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
+  return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
 }
 
 
@@ -425,12 +486,18 @@
 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
                                          AllocationAlignment alignment) {
 #ifdef V8_HOST_ARCH_32_BIT
-  return alignment == kDoubleAligned
-             ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
-             : AllocateRawUnaligned(size_in_bytes);
+  AllocationResult result =
+      alignment == kDoubleAligned
+          ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
+          : AllocateRawUnaligned(size_in_bytes);
 #else
-  return AllocateRawUnaligned(size_in_bytes);
+  AllocationResult result = AllocateRawUnaligned(size_in_bytes);
 #endif
+  HeapObject* heap_obj = nullptr;
+  if (!result.IsRetry() && result.To(&heap_obj)) {
+    AllocationStep(heap_obj->address(), size_in_bytes);
+  }
+  return result;
 }
 
 
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index 90d252a..6b98fc1 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -7,6 +7,7 @@
 #include "src/base/bits.h"
 #include "src/base/platform/platform.h"
 #include "src/full-codegen/full-codegen.h"
+#include "src/heap/slot-set.h"
 #include "src/heap/slots-buffer.h"
 #include "src/macro-assembler.h"
 #include "src/msan.h"
@@ -35,7 +36,7 @@
          owner == page->heap()->code_space());
   Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
              page->area_end(), kOnePageOnly);
-  DCHECK(page->WasSwept() || page->SweepingCompleted());
+  DCHECK(page->SweepingDone());
 }
 
 
@@ -66,10 +67,24 @@
       cur_page);
   cur_addr_ = cur_page->area_start();
   cur_end_ = cur_page->area_end();
-  DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
+  DCHECK(cur_page->SweepingDone());
   return true;
 }
 
+PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
+    : heap_(heap) {
+  AllSpaces spaces(heap_);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+    space->PauseAllocationObservers();
+  }
+}
+
+PauseAllocationObserversScope::~PauseAllocationObserversScope() {
+  AllSpaces spaces(heap_);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+    space->ResumeAllocationObservers();
+  }
+}
 
 // -----------------------------------------------------------------------------
 // CodeRange
@@ -427,8 +442,7 @@
 
   MemoryChunk* chunk =
       MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
-                              area_end, NOT_EXECUTABLE, semi_space);
-  chunk->initialize_scan_on_scavenge(true);
+                              area_end, NOT_EXECUTABLE, semi_space, nullptr);
   bool in_to_space = (semi_space->id() != kFromSpace);
   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
                              : MemoryChunk::IN_FROM_SPACE);
@@ -449,10 +463,10 @@
   SetFlags(0, ~0);
 }
 
-
 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
                                      Address area_start, Address area_end,
-                                     Executability executable, Space* owner) {
+                                     Executability executable, Space* owner,
+                                     base::VirtualMemory* reservation) {
   MemoryChunk* chunk = FromAddress(base);
 
   DCHECK(base == chunk->address());
@@ -464,23 +478,20 @@
   chunk->flags_ = 0;
   chunk->set_owner(owner);
   chunk->InitializeReservedMemory();
-  chunk->slots_buffer_ = NULL;
-  chunk->skip_list_ = NULL;
+  chunk->slots_buffer_ = nullptr;
+  chunk->old_to_new_slots_ = nullptr;
+  chunk->old_to_old_slots_ = nullptr;
+  chunk->skip_list_ = nullptr;
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
-  chunk->parallel_sweeping_state().SetValue(kSweepingDone);
+  chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
   chunk->parallel_compaction_state().SetValue(kCompactingDone);
-  chunk->mutex_ = NULL;
-  chunk->available_in_small_free_list_ = 0;
-  chunk->available_in_medium_free_list_ = 0;
-  chunk->available_in_large_free_list_ = 0;
-  chunk->available_in_huge_free_list_ = 0;
-  chunk->non_available_small_blocks_ = 0;
+  chunk->mutex_ = nullptr;
+  chunk->available_in_free_list_ = 0;
+  chunk->wasted_memory_ = 0;
   chunk->ResetLiveBytes();
   Bitmap::Clear(chunk);
-  chunk->initialize_scan_on_scavenge(false);
-  chunk->SetFlag(WAS_SWEPT);
   chunk->set_next_chunk(nullptr);
   chunk->set_prev_chunk(nullptr);
 
@@ -491,6 +502,10 @@
     chunk->SetFlag(IS_EXECUTABLE);
   }
 
+  if (reservation != nullptr) {
+    chunk->reservation_.TakeControl(reservation);
+  }
+
   return chunk;
 }
 
@@ -692,19 +707,14 @@
     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
   }
 
-  MemoryChunk* result = MemoryChunk::Initialize(
-      heap, base, chunk_size, area_start, area_end, executable, owner);
-  result->set_reserved_memory(&reservation);
-  return result;
+  return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
+                                 executable, owner, &reservation);
 }
 
 
 void Page::ResetFreeListStatistics() {
-  non_available_small_blocks_ = 0;
-  available_in_small_free_list_ = 0;
-  available_in_medium_free_list_ = 0;
-  available_in_large_free_list_ = 0;
-  available_in_huge_free_list_ = 0;
+  wasted_memory_ = 0;
+  available_in_free_list_ = 0;
 }
 
 
@@ -921,21 +931,46 @@
 // -----------------------------------------------------------------------------
 // MemoryChunk implementation
 
-void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
-  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
-  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
-    static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
-  }
-  chunk->IncrementLiveBytes(by);
-}
-
-
 void MemoryChunk::ReleaseAllocatedMemory() {
   delete slots_buffer_;
+  slots_buffer_ = nullptr;
   delete skip_list_;
+  skip_list_ = nullptr;
   delete mutex_;
+  mutex_ = nullptr;
+  ReleaseOldToNewSlots();
+  ReleaseOldToOldSlots();
 }
 
+static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
+  size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
+  DCHECK(pages > 0);
+  SlotSet* slot_set = new SlotSet[pages];
+  for (size_t i = 0; i < pages; i++) {
+    slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
+  }
+  return slot_set;
+}
+
+void MemoryChunk::AllocateOldToNewSlots() {
+  DCHECK(nullptr == old_to_new_slots_);
+  old_to_new_slots_ = AllocateSlotSet(size_, address());
+}
+
+void MemoryChunk::ReleaseOldToNewSlots() {
+  delete[] old_to_new_slots_;
+  old_to_new_slots_ = nullptr;
+}
+
+void MemoryChunk::AllocateOldToOldSlots() {
+  DCHECK(nullptr == old_to_old_slots_);
+  old_to_old_slots_ = AllocateSlotSet(size_, address());
+}
+
+void MemoryChunk::ReleaseOldToOldSlots() {
+  delete[] old_to_old_slots_;
+  old_to_old_slots_ = nullptr;
+}
 
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
@@ -949,12 +984,18 @@
 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
               ObjectSpace::kObjectSpaceMapSpace);
 
+void Space::AllocationStep(Address soon_object, int size) {
+  if (!allocation_observers_paused_) {
+    for (int i = 0; i < allocation_observers_->length(); ++i) {
+      AllocationObserver* o = (*allocation_observers_)[i];
+      o->AllocationStep(size, soon_object, size);
+    }
+  }
+}
 
 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
                        Executability executable)
-    : Space(heap, space, executable),
-      free_list_(this),
-      end_of_unswept_pages_(NULL) {
+    : Space(heap, space, executable), free_list_(this) {
   area_size_ = MemoryAllocator::PageAreaSize(space);
   accounting_stats_.Clear();
 
@@ -987,52 +1028,6 @@
 }
 
 
-FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
-  FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
-  if (free_space != nullptr) {
-    accounting_stats_.DecreaseCapacity(free_space->size());
-  }
-  return free_space;
-}
-
-
-void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
-                                            int num, intptr_t limit) {
-  DCHECK_GT(num, 0);
-  DCHECK(other != nullptr);
-
-  if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
-
-  EmptyAllocationInfo();
-
-  bool memory_available = true;
-  bool spaces_need_memory = true;
-  FreeSpace* node = nullptr;
-  CompactionSpace* current_space = nullptr;
-  // Iterate over spaces and memory as long as we have memory and there are
-  // spaces in need of some.
-  while (memory_available && spaces_need_memory) {
-    spaces_need_memory = false;
-    // Round-robin over all spaces.
-    for (int i = 0; i < num; i++) {
-      current_space = other[i]->Get(identity());
-      if (current_space->free_list()->Available() < limit) {
-        // Space has not reached its limit. Try to get some memory.
-        spaces_need_memory = true;
-        node = TryRemoveMemory(limit - current_space->free_list()->Available());
-        if (node != nullptr) {
-          CHECK(current_space->identity() == identity());
-          current_space->AddMemory(node->address(), node->size());
-        } else {
-          memory_available = false;
-          break;
-        }
-      }
-    }
-  }
-}
-
-
 void PagedSpace::RefillFreeList() {
   MarkCompactCollector* collector = heap()->mark_compact_collector();
   FreeList* free_list = nullptr;
@@ -1075,7 +1070,6 @@
   }
 }
 
-
 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
   DCHECK(identity() == other->identity());
   // Destroy the linear allocation space of {other}. This is needed to
@@ -1109,8 +1103,6 @@
   DCHECK(other->top() == nullptr);
   DCHECK(other->limit() == nullptr);
 
-  DCHECK(other->end_of_unswept_pages_ == nullptr);
-
   AccountCommitted(other->CommittedMemory());
 
   // Move over pages.
@@ -1136,8 +1128,7 @@
   return size;
 }
 
-
-bool PagedSpace::ContainsSafe(Address addr) {
+bool PagedSpace::ContainsSlow(Address addr) {
   Page* p = Page::FromAddress(addr);
   PageIterator iterator(this);
   while (iterator.has_next()) {
@@ -1229,21 +1220,16 @@
 }
 
 
-void PagedSpace::ReleasePage(Page* page) {
+void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
   DCHECK(page->LiveBytes() == 0);
   DCHECK(AreaSize() == page->area_size());
 
-  if (page->WasSwept()) {
+  if (evict_free_list_items) {
     intptr_t size = free_list_.EvictFreeListItems(page);
     accounting_stats_.AllocateBytes(size);
     DCHECK_EQ(AreaSize(), static_cast<int>(size));
   }
 
-  if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
-    heap()->decrement_scan_on_scavenge_pages();
-    page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
-  }
-
   DCHECK(!free_list_.ContainsPageFreeListItems(page));
 
   if (Page::FromAllocationTop(allocation_info_.top()) == page) {
@@ -1279,7 +1265,7 @@
     if (page == Page::FromAllocationTop(allocation_info_.top())) {
       allocation_pointer_found_in_space = true;
     }
-    CHECK(page->WasSwept());
+    CHECK(page->SweepingDone());
     HeapObjectIterator it(page);
     Address end_of_previous_object = page->area_start();
     Address top = page->area_end();
@@ -1327,8 +1313,6 @@
   // this chunk must be a power of two and it must be aligned to its size.
   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
 
-  int target_semispace_capacity = heap()->TargetSemiSpaceSize();
-
   size_t size = 2 * reserved_semispace_capacity;
   Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
       size, size, &reservation_);
@@ -1357,19 +1341,15 @@
   DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
 
   to_space_.SetUp(chunk_base_, initial_semispace_capacity,
-                  target_semispace_capacity, maximum_semispace_capacity);
+                  maximum_semispace_capacity);
   from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
-                    initial_semispace_capacity, target_semispace_capacity,
-                    maximum_semispace_capacity);
+                    initial_semispace_capacity, maximum_semispace_capacity);
   if (!to_space_.Commit()) {
     return false;
   }
   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
 
   start_ = chunk_base_;
-  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
-  object_mask_ = address_mask_ | kHeapObjectTagMask;
-  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
 
   ResetAllocationInfo();
 
@@ -1416,7 +1396,7 @@
     if (!from_space_.GrowTo(new_capacity)) {
       // If we managed to grow to-space but couldn't grow from-space,
       // attempt to shrink to-space.
-      if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
+      if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
         CHECK(false);
@@ -1427,36 +1407,6 @@
 }
 
 
-bool NewSpace::GrowOnePage() {
-  if (TotalCapacity() == MaximumCapacity()) return false;
-  int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize;
-  if (to_space_.GrowTo(new_capacity)) {
-    // Only grow from space if we managed to grow to-space and the from space
-    // is actually committed.
-    if (from_space_.is_committed()) {
-      if (!from_space_.GrowTo(new_capacity)) {
-        // If we managed to grow to-space but couldn't grow from-space,
-        // attempt to shrink to-space.
-        if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
-          // We are in an inconsistent state because we could not
-          // commit/uncommit memory from new space.
-          CHECK(false);
-        }
-        return false;
-      }
-    } else {
-      if (!from_space_.SetTotalCapacity(new_capacity)) {
-        // Can't really happen, but better safe than sorry.
-        CHECK(false);
-      }
-    }
-    DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-    return true;
-  }
-  return false;
-}
-
-
 void NewSpace::Shrink() {
   int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
   int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
@@ -1467,7 +1417,7 @@
     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
       // If we managed to shrink to-space but couldn't shrink from
       // space, attempt to grow to-space again.
-      if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
+      if (!to_space_.GrowTo(from_space_.current_capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
         CHECK(false);
@@ -1547,8 +1497,7 @@
     Address high = to_space_.page_high();
     Address new_top = allocation_info_.top() + size_in_bytes;
     allocation_info_.set_limit(Min(new_top, high));
-  } else if (inline_allocation_observers_paused_ ||
-             top_on_previous_step_ == 0) {
+  } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
     // Normal limit is the end of the current page.
     allocation_info_.set_limit(to_space_.page_high());
   } else {
@@ -1564,29 +1513,10 @@
 
 bool NewSpace::AddFreshPage() {
   Address top = allocation_info_.top();
-  if (NewSpacePage::IsAtStart(top)) {
-    // The current page is already empty. Don't try to make another.
-
-    // We should only get here if someone asks to allocate more
-    // than what can be stored in a single page.
-    // TODO(gc): Change the limit on new-space allocation to prevent this
-    // from happening (all such allocations should go directly to LOSpace).
-    return false;
-  }
+  DCHECK(!NewSpacePage::IsAtStart(top));
   if (!to_space_.AdvancePage()) {
-    // Check if we reached the target capacity yet. If not, try to commit a page
-    // and continue.
-    if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) &&
-        GrowOnePage()) {
-      if (!to_space_.AdvancePage()) {
-        // It doesn't make sense that we managed to commit a page, but can't use
-        // it.
-        CHECK(false);
-      }
-    } else {
-      // Failed to get a new page in to-space.
-      return false;
-    }
+    // No more pages left to advance.
+    return false;
   }
 
   // Clear remainder of current page.
@@ -1648,9 +1578,9 @@
 
 
 void NewSpace::StartNextInlineAllocationStep() {
-  if (!inline_allocation_observers_paused_) {
+  if (!allocation_observers_paused_) {
     top_on_previous_step_ =
-        inline_allocation_observers_.length() ? allocation_info_.top() : 0;
+        allocation_observers_->length() ? allocation_info_.top() : 0;
     UpdateInlineAllocationLimit(0);
   }
 }
@@ -1658,44 +1588,36 @@
 
 intptr_t NewSpace::GetNextInlineAllocationStepSize() {
   intptr_t next_step = 0;
-  for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
-    InlineAllocationObserver* o = inline_allocation_observers_[i];
+  for (int i = 0; i < allocation_observers_->length(); ++i) {
+    AllocationObserver* o = (*allocation_observers_)[i];
     next_step = next_step ? Min(next_step, o->bytes_to_next_step())
                           : o->bytes_to_next_step();
   }
-  DCHECK(inline_allocation_observers_.length() == 0 || next_step != 0);
+  DCHECK(allocation_observers_->length() == 0 || next_step != 0);
   return next_step;
 }
 
-
-void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) {
-  inline_allocation_observers_.Add(observer);
+void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
+  Space::AddAllocationObserver(observer);
   StartNextInlineAllocationStep();
 }
 
-
-void NewSpace::RemoveInlineAllocationObserver(
-    InlineAllocationObserver* observer) {
-  bool removed = inline_allocation_observers_.RemoveElement(observer);
-  // Only used in assertion. Suppress unused variable warning.
-  static_cast<void>(removed);
-  DCHECK(removed);
+void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
+  Space::RemoveAllocationObserver(observer);
   StartNextInlineAllocationStep();
 }
 
-
-void NewSpace::PauseInlineAllocationObservers() {
+void NewSpace::PauseAllocationObservers() {
   // Do a step to account for memory allocated so far.
   InlineAllocationStep(top(), top(), nullptr, 0);
-  inline_allocation_observers_paused_ = true;
+  Space::PauseAllocationObservers();
   top_on_previous_step_ = 0;
   UpdateInlineAllocationLimit(0);
 }
 
-
-void NewSpace::ResumeInlineAllocationObservers() {
+void NewSpace::ResumeAllocationObservers() {
   DCHECK(top_on_previous_step_ == 0);
-  inline_allocation_observers_paused_ = false;
+  Space::ResumeAllocationObservers();
   StartNextInlineAllocationStep();
 }
 
@@ -1704,9 +1626,9 @@
                                     Address soon_object, size_t size) {
   if (top_on_previous_step_) {
     int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
-    for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
-      inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated,
-                                                            soon_object, size);
+    for (int i = 0; i < allocation_observers_->length(); ++i) {
+      (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
+                                                  size);
     }
     top_on_previous_step_ = new_top;
   }
@@ -1771,68 +1693,56 @@
 // -----------------------------------------------------------------------------
 // SemiSpace implementation
 
-void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
+void SemiSpace::SetUp(Address start, int initial_capacity,
                       int maximum_capacity) {
-  // Creates a space in the young generation. The constructor does not
-  // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
-  // memory of size 'capacity' when set up, and does not grow or shrink
-  // otherwise.  In the mark-compact collector, the memory region of the from
-  // space is used as the marking stack. It requires contiguous memory
-  // addresses.
-  DCHECK(maximum_capacity >= Page::kPageSize);
-  DCHECK(initial_capacity <= target_capacity);
-  DCHECK(target_capacity <= maximum_capacity);
-  initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
-  total_capacity_ = initial_capacity;
-  target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
-  maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+  DCHECK_GE(maximum_capacity, Page::kPageSize);
+  minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
+  current_capacity_ = minimum_capacity_;
+  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
   committed_ = false;
   start_ = start;
-  address_mask_ = ~(maximum_capacity - 1);
-  object_mask_ = address_mask_ | kHeapObjectTagMask;
-  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
   age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
 }
 
 
 void SemiSpace::TearDown() {
-  start_ = NULL;
-  total_capacity_ = 0;
+  start_ = nullptr;
+  current_capacity_ = 0;
 }
 
 
 bool SemiSpace::Commit() {
   DCHECK(!is_committed());
-  int pages = total_capacity_ / Page::kPageSize;
   if (!heap()->isolate()->memory_allocator()->CommitBlock(
-          start_, total_capacity_, executable())) {
+          start_, current_capacity_, executable())) {
     return false;
   }
-  AccountCommitted(total_capacity_);
+  AccountCommitted(current_capacity_);
 
   NewSpacePage* current = anchor();
-  for (int i = 0; i < pages; i++) {
+  const int num_pages = current_capacity_ / Page::kPageSize;
+  for (int i = 0; i < num_pages; i++) {
     NewSpacePage* new_page =
         NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
     new_page->InsertAfter(current);
     current = new_page;
   }
-
-  SetCapacity(total_capacity_);
-  committed_ = true;
   Reset();
+
+  set_current_capacity(current_capacity_);
+  committed_ = true;
   return true;
 }
 
 
 bool SemiSpace::Uncommit() {
   DCHECK(is_committed());
-  Address start = start_ + maximum_total_capacity_ - total_capacity_;
-  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
-                                                            total_capacity_)) {
+  Address start = start_ + maximum_capacity_ - current_capacity_;
+  if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+          start, current_capacity_)) {
     return false;
   }
-  AccountUncommitted(total_capacity_);
+  AccountUncommitted(current_capacity_);
 
   anchor()->set_next_page(anchor());
   anchor()->set_prev_page(anchor());
@@ -1857,23 +1767,23 @@
   if (!is_committed()) {
     if (!Commit()) return false;
   }
-  DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
-  DCHECK(new_capacity <= maximum_total_capacity_);
-  DCHECK(new_capacity > total_capacity_);
-  int pages_before = total_capacity_ / Page::kPageSize;
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+  DCHECK_LE(new_capacity, maximum_capacity_);
+  DCHECK_GT(new_capacity, current_capacity_);
+  int pages_before = current_capacity_ / Page::kPageSize;
   int pages_after = new_capacity / Page::kPageSize;
 
-  size_t delta = new_capacity - total_capacity_;
+  size_t delta = new_capacity - current_capacity_;
 
   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
   if (!heap()->isolate()->memory_allocator()->CommitBlock(
-          start_ + total_capacity_, delta, executable())) {
+          start_ + current_capacity_, delta, executable())) {
     return false;
   }
   AccountCommitted(static_cast<intptr_t>(delta));
-  SetCapacity(new_capacity);
+  set_current_capacity(new_capacity);
   NewSpacePage* last_page = anchor()->prev_page();
-  DCHECK(last_page != anchor());
+  DCHECK_NE(last_page, anchor());
   for (int i = pages_before; i < pages_after; i++) {
     Address page_address = start_ + i * Page::kPageSize;
     NewSpacePage* new_page =
@@ -1890,11 +1800,11 @@
 
 
 bool SemiSpace::ShrinkTo(int new_capacity) {
-  DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
-  DCHECK(new_capacity >= initial_total_capacity_);
-  DCHECK(new_capacity < total_capacity_);
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+  DCHECK_GE(new_capacity, minimum_capacity_);
+  DCHECK_LT(new_capacity, current_capacity_);
   if (is_committed()) {
-    size_t delta = total_capacity_ - new_capacity;
+    size_t delta = current_capacity_ - new_capacity;
     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
 
     MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
@@ -1911,37 +1821,23 @@
     DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
   }
 
-  SetCapacity(new_capacity);
+  set_current_capacity(new_capacity);
 
   return true;
 }
 
-
-bool SemiSpace::SetTotalCapacity(int new_capacity) {
-  CHECK(!is_committed());
-  if (new_capacity >= initial_total_capacity_ &&
-      new_capacity <= maximum_total_capacity_) {
-    total_capacity_ = new_capacity;
-    return true;
-  }
-  return false;
-}
-
-
-void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
+void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
   anchor_.set_owner(this);
-  // Fixup back-pointers to anchor. Address of anchor changes
-  // when we swap.
+  // Fixup back-pointers to anchor. Address of anchor changes when we swap.
   anchor_.prev_page()->set_next_page(&anchor_);
   anchor_.next_page()->set_prev_page(&anchor_);
 
-  bool becomes_to_space = (id_ == kFromSpace);
-  id_ = becomes_to_space ? kToSpace : kFromSpace;
-  NewSpacePage* page = anchor_.next_page();
-  while (page != &anchor_) {
+  NewSpacePageIterator it(this);
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
     page->set_owner(this);
     page->SetFlags(flags, mask);
-    if (becomes_to_space) {
+    if (id_ == kToSpace) {
       page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
       page->SetFlag(MemoryChunk::IN_TO_SPACE);
       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
@@ -1950,48 +1846,42 @@
       page->SetFlag(MemoryChunk::IN_FROM_SPACE);
       page->ClearFlag(MemoryChunk::IN_TO_SPACE);
     }
-    DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
     DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
            page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
-    page = page->next_page();
   }
 }
 
 
 void SemiSpace::Reset() {
-  DCHECK(anchor_.next_page() != &anchor_);
+  DCHECK_NE(anchor_.next_page(), &anchor_);
   current_page_ = anchor_.next_page();
 }
 
 
 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
   // We won't be swapping semispaces without data in them.
-  DCHECK(from->anchor_.next_page() != &from->anchor_);
-  DCHECK(to->anchor_.next_page() != &to->anchor_);
+  DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
+  DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
 
-  // Swap bits.
-  SemiSpace tmp = *from;
-  *from = *to;
-  *to = tmp;
+  intptr_t saved_to_space_flags = to->current_page()->GetFlags();
 
-  // Fixup back-pointers to the page list anchor now that its address
-  // has changed.
-  // Swap to/from-space bits on pages.
-  // Copy GC flags from old active space (from-space) to new (to-space).
-  intptr_t flags = from->current_page()->GetFlags();
-  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
+  // We swap all properties but id_.
+  std::swap(from->current_capacity_, to->current_capacity_);
+  std::swap(from->maximum_capacity_, to->maximum_capacity_);
+  std::swap(from->minimum_capacity_, to->minimum_capacity_);
+  std::swap(from->start_, to->start_);
+  std::swap(from->age_mark_, to->age_mark_);
+  std::swap(from->committed_, to->committed_);
+  std::swap(from->anchor_, to->anchor_);
+  std::swap(from->current_page_, to->current_page_);
 
-  from->FlipPages(0, 0);
-}
-
-
-void SemiSpace::SetCapacity(int new_capacity) {
-  total_capacity_ = new_capacity;
+  to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
+  from->FixPagesFlags(0, 0);
 }
 
 
 void SemiSpace::set_age_mark(Address mark) {
-  DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
+  DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this);
   age_mark_ = mark;
   // Mark all pages up to the one containing mark.
   NewSpacePageIterator it(space_start(), mark);
@@ -2011,7 +1901,7 @@
   NewSpacePage* page = anchor_.next_page();
   CHECK(anchor_.semi_space() == this);
   while (page != &anchor_) {
-    CHECK(page->semi_space() == this);
+    CHECK_EQ(page->semi_space(), this);
     CHECK(page->InNewSpace());
     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
                                         : MemoryChunk::IN_TO_SPACE));
@@ -2030,8 +1920,7 @@
       // TODO(gc): Check that the live_bytes_count_ field matches the
       // black marking on the page (if we make it match in new-space).
     }
-    CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
-    CHECK(page->prev_page()->next_page() == page);
+    CHECK_EQ(page->prev_page()->next_page(), page);
     page = page->next_page();
   }
 }
@@ -2048,7 +1937,7 @@
   // or end address is on a later page in the linked list of
   // semi-space pages.
   if (page == end_page) {
-    CHECK(start <= end);
+    CHECK_LE(start, end);
   } else {
     while (page != end_page) {
       page = page->next_page();
@@ -2313,8 +2202,7 @@
     }
     prev_node = cur_node;
   }
-  DCHECK_EQ(p->available_in_free_list(type_), sum);
-  p->add_available_in_free_list(type_, -sum);
+  p->add_available_in_free_list(-sum);
   available_ -= sum;
   return sum;
 }
@@ -2337,7 +2225,7 @@
   Page* page = Page::FromAddress(node->address());
   while ((node != nullptr) && !page->CanAllocate()) {
     available_ -= node->size();
-    page->add_available_in_free_list(type_, -(node->Size()));
+    page->add_available_in_free_list(-(node->Size()));
     node = node->next();
   }
 
@@ -2392,7 +2280,7 @@
       }
       // For evacuation candidates we continue.
       if (!page_for_node->CanAllocate()) {
-        page_for_node->add_available_in_free_list(type_, -size);
+        page_for_node->add_available_in_free_list(-size);
         continue;
       }
       // Otherwise we have a large enough node and can return.
@@ -2429,14 +2317,10 @@
   }
 }
 
-
-FreeList::FreeList(PagedSpace* owner)
-    : owner_(owner),
-      wasted_bytes_(0),
-      small_list_(this, kSmall),
-      medium_list_(this, kMedium),
-      large_list_(this, kLarge),
-      huge_list_(this, kHuge) {
+FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    category_[i].Initialize(this, static_cast<FreeListCategoryType>(i));
+  }
   Reset();
 }
 
@@ -2456,10 +2340,10 @@
   wasted_bytes_ += wasted_bytes;
   other->wasted_bytes_ = 0;
 
-  usable_bytes += small_list_.Concatenate(other->GetFreeListCategory(kSmall));
-  usable_bytes += medium_list_.Concatenate(other->GetFreeListCategory(kMedium));
-  usable_bytes += large_list_.Concatenate(other->GetFreeListCategory(kLarge));
-  usable_bytes += huge_list_.Concatenate(other->GetFreeListCategory(kHuge));
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    usable_bytes += category_[i].Concatenate(
+        other->GetFreeListCategory(static_cast<FreeListCategoryType>(i)));
+  }
 
   if (!other->owner()->is_local()) other->mutex()->Unlock();
   if (!owner()->is_local()) mutex_.Unlock();
@@ -2468,10 +2352,9 @@
 
 
 void FreeList::Reset() {
-  small_list_.Reset();
-  medium_list_.Reset();
-  large_list_.Reset();
-  huge_list_.Reset();
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    category_[i].Reset();
+  }
   ResetStats();
 }
 
@@ -2485,7 +2368,7 @@
 
   // Early return to drop too-small blocks on the floor.
   if (size_in_bytes <= kSmallListMin) {
-    page->add_non_available_small_blocks(size_in_bytes);
+    page->add_wasted_memory(size_in_bytes);
     wasted_bytes_ += size_in_bytes;
     return size_in_bytes;
   }
@@ -2493,19 +2376,9 @@
   FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
   // Insert other blocks at the head of a free list of the appropriate
   // magnitude.
-  if (size_in_bytes <= kSmallListMax) {
-    small_list_.Free(free_space, size_in_bytes);
-    page->add_available_in_small_free_list(size_in_bytes);
-  } else if (size_in_bytes <= kMediumListMax) {
-    medium_list_.Free(free_space, size_in_bytes);
-    page->add_available_in_medium_free_list(size_in_bytes);
-  } else if (size_in_bytes <= kLargeListMax) {
-    large_list_.Free(free_space, size_in_bytes);
-    page->add_available_in_large_free_list(size_in_bytes);
-  } else {
-    huge_list_.Free(free_space, size_in_bytes);
-    page->add_available_in_huge_free_list(size_in_bytes);
-  }
+  FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+  category_[type].Free(free_space, size_in_bytes);
+  page->add_available_in_free_list(size_in_bytes);
 
   DCHECK(IsVeryLong() || Available() == SumFreeLists());
   return 0;
@@ -2516,7 +2389,7 @@
   FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
   if (node != nullptr) {
     Page::FromAddress(node->address())
-        ->add_available_in_free_list(category, -(*node_size));
+        ->add_available_in_free_list(-(*node_size));
     DCHECK(IsVeryLong() || Available() == SumFreeLists());
   }
   return node;
@@ -2527,50 +2400,37 @@
   FreeSpace* node = nullptr;
   Page* page = nullptr;
 
-  if (size_in_bytes <= kSmallAllocationMax) {
-    node = FindNodeIn(kSmall, node_size);
+  // First try the allocation fast path: try to allocate the minimum element
+  // size of a free list category. This operation is constant time.
+  FreeListCategoryType type =
+      SelectFastAllocationFreeListCategoryType(size_in_bytes);
+  for (int i = type; i < kHuge; i++) {
+    node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
     if (node != nullptr) return node;
   }
 
-  if (size_in_bytes <= kMediumAllocationMax) {
-    node = FindNodeIn(kMedium, node_size);
-    if (node != nullptr) return node;
-  }
-
-  if (size_in_bytes <= kLargeAllocationMax) {
-    node = FindNodeIn(kLarge, node_size);
-    if (node != nullptr) return node;
-  }
-
-  node = huge_list_.SearchForNodeInList(size_in_bytes, node_size);
+  // Next search the huge list for free list nodes. This takes linear time in
+  // the number of huge elements.
+  node = category_[kHuge].SearchForNodeInList(size_in_bytes, node_size);
   if (node != nullptr) {
     page = Page::FromAddress(node->address());
-    page->add_available_in_large_free_list(-(*node_size));
+    page->add_available_in_free_list(-(*node_size));
     DCHECK(IsVeryLong() || Available() == SumFreeLists());
     return node;
   }
 
-  if (size_in_bytes <= kSmallListMax) {
-    node = small_list_.PickNodeFromList(size_in_bytes, node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_small_free_list(-(*node_size));
-    }
-  } else if (size_in_bytes <= kMediumListMax) {
-    node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_medium_free_list(-(*node_size));
-    }
-  } else if (size_in_bytes <= kLargeListMax) {
-    node = large_list_.PickNodeFromList(size_in_bytes, node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_large_free_list(-(*node_size));
-    }
+  // We need a huge block of memory, but we didn't find anything in the huge
+  // list.
+  if (type == kHuge) return nullptr;
+
+  // Now search the best fitting free list for a node that has at least the
+  // requested size. This takes linear time in the number of elements.
+  type = SelectFreeListCategoryType(size_in_bytes);
+  node = category_[type].PickNodeFromList(size_in_bytes, node_size);
+  if (node != nullptr) {
+    DCHECK(size_in_bytes <= *node_size);
+    page = Page::FromAddress(node->address());
+    page->add_available_in_free_list(-(*node_size));
   }
 
   DCHECK(IsVeryLong() || Available() == SumFreeLists());
@@ -2633,6 +2493,7 @@
   int new_node_size = 0;
   FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
   if (new_node == nullptr) return nullptr;
+  owner_->AllocationStep(new_node->address(), size_in_bytes);
 
   int bytes_left = new_node_size - size_in_bytes;
   DCHECK(bytes_left >= 0);
@@ -2683,29 +2544,30 @@
 
 
 intptr_t FreeList::EvictFreeListItems(Page* p) {
-  intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
+  intptr_t sum = category_[kHuge].EvictFreeListItemsInList(p);
   if (sum < p->area_size()) {
-    sum += small_list_.EvictFreeListItemsInList(p) +
-           medium_list_.EvictFreeListItemsInList(p) +
-           large_list_.EvictFreeListItemsInList(p);
+    for (int i = kFirstCategory; i <= kLarge; i++) {
+      sum += category_[i].EvictFreeListItemsInList(p);
+    }
   }
   return sum;
 }
 
 
 bool FreeList::ContainsPageFreeListItems(Page* p) {
-  return huge_list_.EvictFreeListItemsInList(p) ||
-         small_list_.EvictFreeListItemsInList(p) ||
-         medium_list_.EvictFreeListItemsInList(p) ||
-         large_list_.EvictFreeListItemsInList(p);
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    if (category_[i].EvictFreeListItemsInList(p)) {
+      return true;
+    }
+  }
+  return false;
 }
 
 
 void FreeList::RepairLists(Heap* heap) {
-  small_list_.RepairFreeList(heap);
-  medium_list_.RepairFreeList(heap);
-  large_list_.RepairFreeList(heap);
-  huge_list_.RepairFreeList(heap);
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    category_[i].RepairFreeList(heap);
+  }
 }
 
 
@@ -2740,8 +2602,12 @@
 
 
 bool FreeList::IsVeryLong() {
-  return small_list_.IsVeryLong() || medium_list_.IsVeryLong() ||
-         large_list_.IsVeryLong() || huge_list_.IsVeryLong();
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    if (category_[i].IsVeryLong()) {
+      return true;
+    }
+  }
+  return false;
 }
 
 
@@ -2749,10 +2615,10 @@
 // on the free list, so it should not be called if FreeListLength returns
 // kVeryLongFreeList.
 intptr_t FreeList::SumFreeLists() {
-  intptr_t sum = small_list_.SumFreeList();
-  sum += medium_list_.SumFreeList();
-  sum += large_list_.SumFreeList();
-  sum += huge_list_.SumFreeList();
+  intptr_t sum = 0;
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    sum += category_[i].SumFreeList();
+  }
   return sum;
 }
 #endif
@@ -2791,7 +2657,7 @@
   PageIterator iterator(this);
   while (iterator.has_next()) {
     Page* page = iterator.next();
-    int size = static_cast<int>(page->non_available_small_blocks());
+    int size = static_cast<int>(page->wasted_memory());
     if (size == 0) continue;
     Address address = page->OffsetToAddress(Page::kPageSize - size);
     heap()->CreateFillerObjectAt(address, size);
@@ -2837,6 +2703,8 @@
 
 
 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+  const int kMaxPagesToSweep = 1;
+
   // Allocation in this space has failed.
 
   MarkCompactCollector* collector = heap()->mark_compact_collector();
@@ -2851,10 +2719,13 @@
     if (object != NULL) return object;
 
     // If sweeping is still in progress try to sweep pages on the main thread.
-    collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes);
+    int max_freed = collector->SweepInParallel(heap()->paged_space(identity()),
+                                               size_in_bytes, kMaxPagesToSweep);
     RefillFreeList();
-    object = free_list_.Allocate(size_in_bytes);
-    if (object != nullptr) return object;
+    if (max_freed >= size_in_bytes) {
+      object = free_list_.Allocate(size_in_bytes);
+      if (object != nullptr) return object;
+    }
   }
 
   // Free list allocation failed and there is no next page.  Fail if we have
@@ -3139,6 +3010,7 @@
   }
 
   heap()->incremental_marking()->OldSpaceStep(object_size);
+  AllocationStep(object->address(), object_size);
   return object;
 }
 
@@ -3252,11 +3124,6 @@
 }
 
 
-bool LargeObjectSpace::Contains(Address address) {
-  return FindPage(address) != NULL;
-}
-
-
 #ifdef VERIFY_HEAP
 // We do not assume that the large object iterator works, because it depends
 // on the invariants we are checking during verification.
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index a8102ca..c0d399f 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -19,8 +19,20 @@
 namespace v8 {
 namespace internal {
 
+class AllocationInfo;
+class AllocationObserver;
+class CompactionSpace;
 class CompactionSpaceCollection;
+class FreeList;
 class Isolate;
+class MemoryAllocator;
+class MemoryChunk;
+class PagedSpace;
+class SemiSpace;
+class SkipList;
+class SlotsBuffer;
+class SlotSet;
+class Space;
 
 // -----------------------------------------------------------------------------
 // Heap structures:
@@ -96,13 +108,6 @@
 #define DCHECK_MAP_PAGE_INDEX(index) \
   DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
 
-class AllocationInfo;
-class CompactionSpace;
-class FreeList;
-class MemoryAllocator;
-class MemoryChunk;
-class PagedSpace;
-class Space;
 
 class MarkBit {
  public:
@@ -284,9 +289,6 @@
 };
 
 
-class SkipList;
-class SlotsBuffer;
-
 // MemoryChunk represents a memory region owned by a specific space.
 // It is divided into the header and the body. Chunk start is always
 // 1MB aligned. Start of the body is aligned so it can accommodate
@@ -295,10 +297,8 @@
  public:
   enum MemoryChunkFlags {
     IS_EXECUTABLE,
-    ABOUT_TO_BE_FREED,
     POINTERS_TO_HERE_ARE_INTERESTING,
     POINTERS_FROM_HERE_ARE_INTERESTING,
-    SCAN_ON_SCAVENGE,
     IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
     IN_TO_SPACE,    // All pages in new space has one of these two set.
     NEW_SPACE_BELOW_AGE_MARK,
@@ -307,10 +307,6 @@
     NEVER_EVACUATE,  // May contain immortal immutables.
     POPULAR_PAGE,    // Slots buffer of this page overflowed on the previous GC.
 
-    // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
-    // otherwise marking bits are still intact.
-    WAS_SWEPT,
-
     // Large objects can have a progress bar in their page header. These object
     // are scanned in increments and will be kept black while being scanned.
     // Even if the mutator writes to them they will be kept black and a white
@@ -323,7 +319,7 @@
     // candidates selection cycle.
     FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
 
-    // This flag is inteded to be used for testing.
+    // This flag is intended to be used for testing.
     NEVER_ALLOCATE_ON_PAGE,
 
     // The memory chunk is already logically freed, however the actual freeing
@@ -352,16 +348,14 @@
   };
 
   // |kSweepingDone|: The page state when sweeping is complete or sweeping must
-  //   not be performed on that page.
-  // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will
-  //   not touch the page memory anymore.
-  // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
+  //   not be performed on that page. Sweeper threads that are done with their
+  //   work will set this value and not touch the page anymore.
   // |kSweepingPending|: This page is ready for parallel sweeping.
-  enum ParallelSweepingState {
+  // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
+  enum ConcurrentSweepingState {
     kSweepingDone,
-    kSweepingFinalize,
+    kSweepingPending,
     kSweepingInProgress,
-    kSweepingPending
   };
 
   // Every n write barrier invocations we go to runtime even though
@@ -396,31 +390,32 @@
       + 2 * kPointerSize          // base::VirtualMemory reservation_
       + kPointerSize              // Address owner_
       + kPointerSize              // Heap* heap_
-      + kIntSize;                 // int store_buffer_counter_
+      + kIntSize;                 // int progress_bar_
 
   static const size_t kSlotsBufferOffset =
       kLiveBytesOffset + kIntSize;  // int live_byte_count_
 
   static const size_t kWriteBarrierCounterOffset =
       kSlotsBufferOffset + kPointerSize  // SlotsBuffer* slots_buffer_;
+      + kPointerSize                     // SlotSet* old_to_new_slots_;
+      + kPointerSize                     // SlotSet* old_to_old_slots_;
       + kPointerSize;                    // SkipList* skip_list_;
 
   static const size_t kMinHeaderSize =
       kWriteBarrierCounterOffset +
       kIntptrSize         // intptr_t write_barrier_counter_
-      + kIntSize          // int progress_bar_
       + kPointerSize      // AtomicValue high_water_mark_
       + kPointerSize      // base::Mutex* mutex_
       + kPointerSize      // base::AtomicWord parallel_sweeping_
       + kPointerSize      // AtomicValue parallel_compaction_
-      + 5 * kPointerSize  // AtomicNumber free-list statistics
+      + 2 * kPointerSize  // AtomicNumber free-list statistics
       + kPointerSize      // AtomicValue next_chunk_
       + kPointerSize;     // AtomicValue prev_chunk_
 
   // We add some more space to the computed header size to amount for missing
   // alignment requirements in our computation.
   // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
-  static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
+  static const size_t kHeaderSize = kMinHeaderSize;
 
   static const int kBodyOffset =
       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -435,30 +430,16 @@
 
   static const int kFlagsOffset = kPointerSize;
 
-  static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
+  static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
+  static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
 
   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
   static MemoryChunk* FromAddress(Address a) {
     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
   }
 
-  static const MemoryChunk* FromAddress(const byte* a) {
-    return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
-                                                ~kAlignmentMask);
-  }
-
-  static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
-    MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
-  }
-
-  // Only works for addresses in pointer spaces, not data or code spaces.
   static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
 
-  static inline uint32_t FastAddressToMarkbitIndex(Address addr) {
-    const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
-    return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
-  }
-
   static inline void UpdateHighWaterMark(Address mark) {
     if (mark == nullptr) return;
     // Need to subtract one from the mark because when a chunk is full the
@@ -477,144 +458,38 @@
 
   bool is_valid() { return address() != NULL; }
 
-  MemoryChunk* next_chunk() { return next_chunk_.Value(); }
-
-  MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
-
-  void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
-
-  void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
-
-  Space* owner() const {
-    if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
-        kPageHeaderTag) {
-      return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
-                                      kPageHeaderTag);
-    } else {
-      return NULL;
-    }
-  }
-
-  void set_owner(Space* space) {
-    DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
-    owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
-    DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
-           kPageHeaderTag);
-  }
-
-  base::VirtualMemory* reserved_memory() { return &reservation_; }
-
-  void set_reserved_memory(base::VirtualMemory* reservation) {
-    DCHECK_NOT_NULL(reservation);
-    reservation_.TakeControl(reservation);
-  }
-
-  bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
-  void initialize_scan_on_scavenge(bool scan) {
-    if (scan) {
-      SetFlag(SCAN_ON_SCAVENGE);
-    } else {
-      ClearFlag(SCAN_ON_SCAVENGE);
-    }
-  }
-  inline void set_scan_on_scavenge(bool scan);
-
-  int store_buffer_counter() { return store_buffer_counter_; }
-  void set_store_buffer_counter(int counter) {
-    store_buffer_counter_ = counter;
-  }
+  base::Mutex* mutex() { return mutex_; }
 
   bool Contains(Address addr) {
     return addr >= area_start() && addr < area_end();
   }
 
-  // Checks whether addr can be a limit of addresses in this page.
-  // It's a limit if it's in the page, or if it's just after the
-  // last byte of the page.
+  // Checks whether |addr| can be a limit of addresses in this page. It's a
+  // limit if it's in the page, or if it's just after the last byte of the page.
   bool ContainsLimit(Address addr) {
     return addr >= area_start() && addr <= area_end();
   }
 
-  void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
-
-  void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
-
-  void SetFlagTo(int flag, bool value) {
-    if (value) {
-      SetFlag(flag);
-    } else {
-      ClearFlag(flag);
-    }
-  }
-
-  bool IsFlagSet(int flag) {
-    return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
-  }
-
-  // Set or clear multiple flags at a time. The flags in the mask
-  // are set to the value in "flags", the rest retain the current value
-  // in flags_.
-  void SetFlags(intptr_t flags, intptr_t mask) {
-    flags_ = (flags_ & ~mask) | (flags & mask);
-  }
-
-  // Return all current flags.
-  intptr_t GetFlags() { return flags_; }
-
-  AtomicValue<ParallelSweepingState>& parallel_sweeping_state() {
-    return parallel_sweeping_;
+  AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
+    return concurrent_sweeping_;
   }
 
   AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
     return parallel_compaction_;
   }
 
-  bool TryLock() { return mutex_->TryLock(); }
-
-  base::Mutex* mutex() { return mutex_; }
-
-  // WaitUntilSweepingCompleted only works when concurrent sweeping is in
-  // progress. In particular, when we know that right before this call a
-  // sweeper thread was sweeping this page.
-  void WaitUntilSweepingCompleted() {
-    mutex_->Lock();
-    mutex_->Unlock();
-    DCHECK(SweepingCompleted());
-  }
-
-  bool SweepingCompleted() {
-    return parallel_sweeping_state().Value() <= kSweepingFinalize;
-  }
-
-  // Manage live byte count (count of bytes known to be live,
-  // because they are marked black).
-  void ResetLiveBytes() {
-    if (FLAG_gc_verbose) {
-      PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
-             live_byte_count_);
-    }
-    live_byte_count_ = 0;
-  }
-
-  void IncrementLiveBytes(int by) {
-    if (FLAG_gc_verbose) {
-      printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
-             live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
-             live_byte_count_ + by);
-    }
-    live_byte_count_ += by;
-    DCHECK_GE(live_byte_count_, 0);
-    DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
-  }
+  // Manage live byte count, i.e., count of bytes in black objects.
+  inline void ResetLiveBytes();
+  inline void IncrementLiveBytes(int by);
 
   int LiveBytes() {
-    DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
+    DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
     return live_byte_count_;
   }
 
   void SetLiveBytes(int live_bytes) {
     DCHECK_GE(live_bytes, 0);
-    DCHECK_LE(static_cast<unsigned>(live_bytes), size_);
+    DCHECK_LE(static_cast<size_t>(live_bytes), size_);
     live_byte_count_ = live_bytes;
   }
 
@@ -626,6 +501,35 @@
     write_barrier_counter_ = counter;
   }
 
+  size_t size() const { return size_; }
+
+  inline Heap* heap() const { return heap_; }
+
+  inline SkipList* skip_list() { return skip_list_; }
+
+  inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
+
+  inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
+
+  inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
+
+  inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
+  inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
+
+  void AllocateOldToNewSlots();
+  void ReleaseOldToNewSlots();
+  void AllocateOldToOldSlots();
+  void ReleaseOldToOldSlots();
+
+  Address area_start() { return area_start_; }
+  Address area_end() { return area_end_; }
+  int area_size() { return static_cast<int>(area_end() - area_start()); }
+
+  bool CommitArea(size_t requested);
+
+  // Approximate amount of physical memory committed for this chunk.
+  size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
+
   int progress_bar() {
     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
     return progress_bar_;
@@ -643,35 +547,10 @@
     }
   }
 
-  size_t size() const { return size_; }
-
-  void set_size(size_t size) { size_ = size; }
-
-  void SetArea(Address area_start, Address area_end) {
-    area_start_ = area_start;
-    area_end_ = area_end;
-  }
-
-  Executability executable() {
-    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
-  }
-
-  bool InNewSpace() {
-    return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
-  }
-
-  bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
-
-  bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
-
-  // Markbits support
-
   inline Bitmap* markbits() {
     return Bitmap::FromAddress(address() + kHeaderSize);
   }
 
-  void PrintMarkbits() { markbits()->Print(); }
-
   inline uint32_t AddressToMarkbitIndex(Address addr) {
     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
   }
@@ -680,10 +559,24 @@
     return this->address() + (index << kPointerSizeLog2);
   }
 
-  void InsertAfter(MemoryChunk* other);
-  void Unlink();
+  void PrintMarkbits() { markbits()->Print(); }
 
-  inline Heap* heap() const { return heap_; }
+  void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
+
+  void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
+
+  bool IsFlagSet(int flag) {
+    return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
+  }
+
+  // Set or clear multiple flags at a time. The flags in the mask are set to
+  // the value in "flags", the rest retain the current value in |flags_|.
+  void SetFlags(intptr_t flags, intptr_t mask) {
+    flags_ = (flags_ & ~mask) | (flags & mask);
+  }
+
+  // Return all current flags.
+  intptr_t GetFlags() { return flags_; }
 
   bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
 
@@ -698,21 +591,9 @@
     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
   }
 
-  bool ShouldSkipEvacuationSlotRecording() {
-    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
-  }
-
-  inline SkipList* skip_list() { return skip_list_; }
-
-  inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
-
-  inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
-
-  inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
-
   void MarkEvacuationCandidate() {
     DCHECK(!IsFlagSet(NEVER_EVACUATE));
-    DCHECK(slots_buffer_ == NULL);
+    DCHECK_NULL(slots_buffer_);
     SetFlag(EVACUATION_CANDIDATE);
   }
 
@@ -721,21 +602,62 @@
     ClearFlag(EVACUATION_CANDIDATE);
   }
 
-  Address area_start() { return area_start_; }
-  Address area_end() { return area_end_; }
-  int area_size() { return static_cast<int>(area_end() - area_start()); }
-  bool CommitArea(size_t requested);
+  bool ShouldSkipEvacuationSlotRecording() {
+    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
+  }
 
-  // Approximate amount of physical memory committed for this chunk.
-  size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
+  Executability executable() {
+    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+  }
 
-  // Should be called when memory chunk is about to be freed.
-  void ReleaseAllocatedMemory();
+  bool InNewSpace() {
+    return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
+  }
+
+  bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
+
+  bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
+
+  MemoryChunk* next_chunk() { return next_chunk_.Value(); }
+
+  MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
+
+  void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
+
+  void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
+
+  Space* owner() const {
+    if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+        kPageHeaderTag) {
+      return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
+                                      kPageHeaderTag);
+    } else {
+      return nullptr;
+    }
+  }
+
+  void set_owner(Space* space) {
+    DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
+    owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
+    DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+           kPageHeaderTag);
+  }
+
+  bool HasPageHeader() { return owner() != nullptr; }
+
+  void InsertAfter(MemoryChunk* other);
+  void Unlink();
 
  protected:
   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
                                  Address area_start, Address area_end,
-                                 Executability executable, Space* owner);
+                                 Executability executable, Space* owner,
+                                 base::VirtualMemory* reservation);
+
+  // Should be called when memory chunk is about to be freed.
+  void ReleaseAllocatedMemory();
+
+  base::VirtualMemory* reserved_memory() { return &reservation_; }
 
   size_t size_;
   intptr_t flags_;
@@ -746,36 +668,45 @@
 
   // If the chunk needs to remember its memory reservation, it is stored here.
   base::VirtualMemory reservation_;
+
   // The identity of the owning space.  This is tagged as a failure pointer, but
   // no failure can be in an object, so this can be distinguished from any entry
   // in a fixed array.
   Address owner_;
+
   Heap* heap_;
-  // Used by the store buffer to keep track of which pages to mark scan-on-
-  // scavenge.
-  int store_buffer_counter_;
-  // Count of bytes marked black on page.
-  int live_byte_count_;
-  SlotsBuffer* slots_buffer_;
-  SkipList* skip_list_;
-  intptr_t write_barrier_counter_;
+
   // Used by the incremental marker to keep track of the scanning progress in
   // large objects that have a progress bar and are scanned in increments.
   int progress_bar_;
+
+  // Count of bytes marked black on page.
+  int live_byte_count_;
+
+  SlotsBuffer* slots_buffer_;
+
+  // A single slot set for small pages (of size kPageSize) or an array of slot
+  // set for large pages. In the latter case the number of entries in the array
+  // is ceil(size() / kPageSize).
+  SlotSet* old_to_new_slots_;
+  SlotSet* old_to_old_slots_;
+
+  SkipList* skip_list_;
+
+  intptr_t write_barrier_counter_;
+
   // Assuming the initial allocation on a page is sequential,
   // count highest number of bytes ever allocated on the page.
   AtomicValue<intptr_t> high_water_mark_;
 
   base::Mutex* mutex_;
-  AtomicValue<ParallelSweepingState> parallel_sweeping_;
+
+  AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
   AtomicValue<ParallelCompactingState> parallel_compaction_;
 
   // PagedSpace free-list statistics.
-  AtomicNumber<intptr_t> available_in_small_free_list_;
-  AtomicNumber<intptr_t> available_in_medium_free_list_;
-  AtomicNumber<intptr_t> available_in_large_free_list_;
-  AtomicNumber<intptr_t> available_in_huge_free_list_;
-  AtomicNumber<intptr_t> non_available_small_blocks_;
+  AtomicNumber<intptr_t> available_in_free_list_;
+  AtomicNumber<intptr_t> wasted_memory_;
 
   // next_chunk_ holds a pointer of type MemoryChunk
   AtomicValue<MemoryChunk*> next_chunk_;
@@ -789,9 +720,16 @@
   friend class MemoryChunkValidator;
 };
 
+enum FreeListCategoryType {
+  kSmall,
+  kMedium,
+  kLarge,
+  kHuge,
 
-enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
-
+  kFirstCategory = kSmall,
+  kLastCategory = kHuge,
+  kNumberOfCategories = kLastCategory + 1
+};
 
 // -----------------------------------------------------------------------------
 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
@@ -809,6 +747,9 @@
     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
   }
 
+  // Only works for addresses in pointer spaces, not code space.
+  inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
+
   // Returns the page containing an allocation top. Because an allocation
   // top address can be the upper bound of the page, we need to subtract
   // it with kPointerSize first. The address ranges from
@@ -873,17 +814,24 @@
 
   void InitializeAsAnchor(PagedSpace* owner);
 
-  bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
-  void SetWasSwept() { SetFlag(WAS_SWEPT); }
-  void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
+  // WaitUntilSweepingCompleted only works when concurrent sweeping is in
+  // progress. In particular, when we know that right before this call a
+  // sweeper thread was sweeping this page.
+  void WaitUntilSweepingCompleted() {
+    mutex_->Lock();
+    mutex_->Unlock();
+    DCHECK(SweepingDone());
+  }
+
+  bool SweepingDone() {
+    return concurrent_sweeping_state().Value() == kSweepingDone;
+  }
 
   void ResetFreeListStatistics();
 
   int LiveBytesFromFreeList() {
-    return static_cast<int>(
-        area_size() - non_available_small_blocks() -
-        available_in_small_free_list() - available_in_medium_free_list() -
-        available_in_large_free_list() - available_in_huge_free_list());
+    return static_cast<int>(area_size() - wasted_memory() -
+                            available_in_free_list());
   }
 
 #define FRAGMENTATION_STATS_ACCESSORS(type, name)        \
@@ -891,50 +839,11 @@
   void set_##name(type name) { name##_.SetValue(name); } \
   void add_##name(type name) { name##_.Increment(name); }
 
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory)
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list)
 
 #undef FRAGMENTATION_STATS_ACCESSORS
 
-  void add_available_in_free_list(FreeListCategoryType type, intptr_t bytes) {
-    switch (type) {
-      case kSmall:
-        add_available_in_small_free_list(bytes);
-        break;
-      case kMedium:
-        add_available_in_medium_free_list(bytes);
-        break;
-      case kLarge:
-        add_available_in_large_free_list(bytes);
-        break;
-      case kHuge:
-        add_available_in_huge_free_list(bytes);
-        break;
-      default:
-        UNREACHABLE();
-    }
-  }
-
-  intptr_t available_in_free_list(FreeListCategoryType type) {
-    switch (type) {
-      case kSmall:
-        return available_in_small_free_list();
-      case kMedium:
-        return available_in_medium_free_list();
-      case kLarge:
-        return available_in_large_free_list();
-      case kHuge:
-        return available_in_huge_free_list();
-      default:
-        UNREACHABLE();
-    }
-    UNREACHABLE();
-    return 0;
-  }
-
 #ifdef DEBUG
   void Print();
 #endif  // DEBUG
@@ -965,7 +874,9 @@
 class Space : public Malloced {
  public:
   Space(Heap* heap, AllocationSpace id, Executability executable)
-      : heap_(heap),
+      : allocation_observers_(new List<AllocationObserver*>()),
+        allocation_observers_paused_(false),
+        heap_(heap),
         id_(id),
         executable_(executable),
         committed_(0),
@@ -981,6 +892,26 @@
   // Identity used in error reporting.
   AllocationSpace identity() { return id_; }
 
+  virtual void AddAllocationObserver(AllocationObserver* observer) {
+    allocation_observers_->Add(observer);
+  }
+
+  virtual void RemoveAllocationObserver(AllocationObserver* observer) {
+    bool removed = allocation_observers_->RemoveElement(observer);
+    USE(removed);
+    DCHECK(removed);
+  }
+
+  virtual void PauseAllocationObservers() {
+    allocation_observers_paused_ = true;
+  }
+
+  virtual void ResumeAllocationObservers() {
+    allocation_observers_paused_ = false;
+  }
+
+  void AllocationStep(Address soon_object, int size);
+
   // Return the total amount committed memory for this space, i.e., allocatable
   // memory and page headers.
   virtual intptr_t CommittedMemory() { return committed_; }
@@ -1027,6 +958,9 @@
     DCHECK_GE(committed_, 0);
   }
 
+  v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
+  bool allocation_observers_paused_;
+
  private:
   Heap* heap_;
   AllocationSpace id_;
@@ -1628,12 +1562,12 @@
 // A free list category maintains a linked list of free memory blocks.
 class FreeListCategory {
  public:
-  explicit FreeListCategory(FreeList* owner, FreeListCategoryType type)
-      : type_(type),
-        top_(nullptr),
-        end_(nullptr),
-        available_(0),
-        owner_(owner) {}
+  FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {}
+
+  void Initialize(FreeList* owner, FreeListCategoryType type) {
+    owner_ = owner;
+    type_ = type;
+  }
 
   // Concatenates {category} into {this}.
   //
@@ -1763,8 +1697,11 @@
 
   // Return the number of bytes available on the free list.
   intptr_t Available() {
-    return small_list_.available() + medium_list_.available() +
-           large_list_.available() + huge_list_.available();
+    intptr_t available = 0;
+    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+      available += category_[i].available();
+    }
+    return available;
   }
 
   // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
@@ -1776,8 +1713,10 @@
   MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
 
   bool IsEmpty() {
-    return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
-           large_list_.IsEmpty() && huge_list_.IsEmpty();
+    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+      if (!category_[i].IsEmpty()) return false;
+    }
+    return true;
   }
 
   // Used after booting the VM.
@@ -1813,29 +1752,36 @@
   FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
 
   FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
-    switch (category) {
-      case kSmall:
-        return &small_list_;
-      case kMedium:
-        return &medium_list_;
-      case kLarge:
-        return &large_list_;
-      case kHuge:
-        return &huge_list_;
-      default:
-        UNREACHABLE();
+    return &category_[category];
+  }
+
+  FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
+    if (size_in_bytes <= kSmallListMax) {
+      return kSmall;
+    } else if (size_in_bytes <= kMediumListMax) {
+      return kMedium;
+    } else if (size_in_bytes <= kLargeListMax) {
+      return kLarge;
     }
-    UNREACHABLE();
-    return nullptr;
+    return kHuge;
+  }
+
+  FreeListCategoryType SelectFastAllocationFreeListCategoryType(
+      size_t size_in_bytes) {
+    if (size_in_bytes <= kSmallAllocationMax) {
+      return kSmall;
+    } else if (size_in_bytes <= kMediumAllocationMax) {
+      return kMedium;
+    } else if (size_in_bytes <= kLargeAllocationMax) {
+      return kLarge;
+    }
+    return kHuge;
   }
 
   PagedSpace* owner_;
   base::Mutex mutex_;
   intptr_t wasted_bytes_;
-  FreeListCategory small_list_;
-  FreeListCategory medium_list_;
-  FreeListCategory large_list_;
-  FreeListCategory huge_list_;
+  FreeListCategory category_[kNumberOfCategories];
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
 };
@@ -1959,10 +1905,8 @@
 
   // Checks whether an object/address is in this space.
   inline bool Contains(Address a);
-  inline bool Contains(HeapObject* o);
-  // Unlike Contains() methods it is safe to call this one even for addresses
-  // of unmapped memory.
-  bool ContainsSafe(Address addr);
+  inline bool Contains(Object* o);
+  bool ContainsSlow(Address addr);
 
   // Given an address occupied by a live object, return that object if it is
   // in this space, or a Smi if it is not.  The implementation iterates over
@@ -2085,7 +2029,7 @@
   void IncreaseCapacity(int size);
 
   // Releases an unused page and shrinks the space.
-  void ReleasePage(Page* page);
+  void ReleasePage(Page* page, bool evict_free_list_items);
 
   // The dummy page that anchors the linked list of pages.
   Page* anchor() { return &anchor_; }
@@ -2112,23 +2056,12 @@
   static void ResetCodeStatistics(Isolate* isolate);
 #endif
 
-  // Evacuation candidates are swept by evacuator.  Needs to return a valid
-  // result before _and_ after evacuation has finished.
-  static bool ShouldBeSweptBySweeperThreads(Page* p) {
-    return !p->IsEvacuationCandidate() &&
-           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
-  }
-
   // This function tries to steal size_in_bytes memory from the sweeper threads
   // free-lists. If it does not succeed stealing enough memory, it will wait
   // for the sweeper threads to finish sweeping.
   // It returns true when sweeping is completed and false otherwise.
   bool EnsureSweeperProgress(intptr_t size_in_bytes);
 
-  void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
-
-  Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
-
   Page* FirstPage() { return anchor_.next_page(); }
   Page* LastPage() { return anchor_.prev_page(); }
 
@@ -2148,9 +2081,6 @@
   // e.g., removes its bump pointer area and resets statistics.
   void MergeCompactionSpace(CompactionSpace* other);
 
-  void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num,
-                                  intptr_t limit = kCompactionMemoryWanted);
-
   // Refills the free list from the corresponding free list filled by the
   // sweeper.
   virtual void RefillFreeList();
@@ -2158,8 +2088,6 @@
  protected:
   void AddMemory(Address start, intptr_t size);
 
-  FreeSpace* TryRemoveMemory(intptr_t size_in_bytes);
-
   void MoveOverFreeMemory(PagedSpace* other);
 
   // PagedSpaces that should be included in snapshots have different, i.e.,
@@ -2212,11 +2140,6 @@
   // Normal allocation information.
   AllocationInfo allocation_info_;
 
-  // The sweeper threads iterate over the list of pointer and data space pages
-  // and sweep these pages concurrently. They will stop sweeping after the
-  // end_of_unswept_pages_ page.
-  Page* end_of_unswept_pages_;
-
   // Mutex guarding any concurrent access to the space.
   base::Mutex space_mutex_;
 
@@ -2266,17 +2189,13 @@
 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
 
 
-class SemiSpace;
-
-
 class NewSpacePage : public MemoryChunk {
  public:
   // GC related flags copied from from-space to to-space when
   // flipping semispaces.
   static const intptr_t kCopyOnFlipFlagsMask =
       (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
-      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
-      (1 << MemoryChunk::SCAN_ON_SCAVENGE);
+      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
 
   static const int kAreaSize = Page::kAllocatableMemory;
 
@@ -2349,31 +2268,39 @@
 // -----------------------------------------------------------------------------
 // SemiSpace in young generation
 //
-// A semispace is a contiguous chunk of memory holding page-like memory
-// chunks. The mark-compact collector  uses the memory of the first page in
-// the from space as a marking stack when tracing live objects.
-
+// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
+// The mark-compact collector  uses the memory of the first page in the from
+// space as a marking stack when tracing live objects.
 class SemiSpace : public Space {
  public:
-  // Constructor.
+  static void Swap(SemiSpace* from, SemiSpace* to);
+
   SemiSpace(Heap* heap, SemiSpaceId semispace)
       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
-        start_(NULL),
-        age_mark_(NULL),
+        current_capacity_(0),
+        maximum_capacity_(0),
+        minimum_capacity_(0),
+        start_(nullptr),
+        age_mark_(nullptr),
+        committed_(false),
         id_(semispace),
         anchor_(this),
-        current_page_(NULL) {}
+        current_page_(nullptr) {}
 
-  // Sets up the semispace using the given chunk.
-  void SetUp(Address start, int initial_capacity, int target_capacity,
-             int maximum_capacity);
+  inline bool Contains(HeapObject* o);
+  inline bool Contains(Object* o);
+  inline bool ContainsSlow(Address a);
+
+  // Creates a space in the young generation. The constructor does not
+  // allocate memory from the OS.
+  void SetUp(Address start, int initial_capacity, int maximum_capacity);
 
   // Tear down the space.  Heap memory was not allocated by the space, so it
   // is not deallocated here.
   void TearDown();
 
   // True if the space has been set up but not torn down.
-  bool HasBeenSetUp() { return start_ != NULL; }
+  bool HasBeenSetUp() { return start_ != nullptr; }
 
   // Grow the semispace to the new capacity.  The new capacity
   // requested must be larger than the current capacity and less than
@@ -2385,12 +2312,9 @@
   // semispace and less than the current capacity.
   bool ShrinkTo(int new_capacity);
 
-  // Sets the total capacity. Only possible when the space is not committed.
-  bool SetTotalCapacity(int new_capacity);
-
   // Returns the start address of the first page of the space.
   Address space_start() {
-    DCHECK(anchor_.next_page() != &anchor_);
+    DCHECK_NE(anchor_.next_page(), &anchor_);
     return anchor_.next_page()->area_start();
   }
 
@@ -2417,18 +2341,26 @@
   Address age_mark() { return age_mark_; }
   void set_age_mark(Address mark);
 
-  // True if the address is in the address range of this semispace (not
-  // necessarily below the allocation pointer).
-  bool Contains(Address a) {
-    return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
-           reinterpret_cast<uintptr_t>(start_);
-  }
+  bool is_committed() { return committed_; }
+  bool Commit();
+  bool Uncommit();
 
-  // True if the object is a heap object in the address range of this
-  // semispace (not necessarily below the allocation pointer).
-  bool Contains(Object* o) {
-    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
-  }
+  NewSpacePage* first_page() { return anchor_.next_page(); }
+  NewSpacePage* current_page() { return current_page_; }
+
+  // Returns the current total capacity of the semispace.
+  int current_capacity() { return current_capacity_; }
+
+  // Returns the maximum total capacity of the semispace.
+  int maximum_capacity() { return maximum_capacity_; }
+
+  // Returns the initial capacity of the semispace.
+  int minimum_capacity() { return minimum_capacity_; }
+
+  SemiSpaceId id() { return id_; }
+
+  // Approximate amount of physical memory committed for this space.
+  size_t CommittedPhysicalMemory() override;
 
   // If we don't have these here then SemiSpace will be abstract.  However
   // they should never be called:
@@ -2445,18 +2377,6 @@
     return 0;
   }
 
-
-  bool is_committed() { return committed_; }
-  bool Commit();
-  bool Uncommit();
-
-  NewSpacePage* first_page() { return anchor_.next_page(); }
-  NewSpacePage* current_page() { return current_page_; }
-
-#ifdef VERIFY_HEAP
-  virtual void Verify();
-#endif
-
 #ifdef DEBUG
   void Print() override;
   // Validate a range of of addresses in a SemiSpace.
@@ -2468,51 +2388,34 @@
   inline static void AssertValidRange(Address from, Address to) {}
 #endif
 
-  // Returns the current total capacity of the semispace.
-  int TotalCapacity() { return total_capacity_; }
-
-  // Returns the target for total capacity of the semispace.
-  int TargetCapacity() { return target_capacity_; }
-
-  // Returns the maximum total capacity of the semispace.
-  int MaximumTotalCapacity() { return maximum_total_capacity_; }
-
-  // Returns the initial capacity of the semispace.
-  int InitialTotalCapacity() { return initial_total_capacity_; }
-
-  SemiSpaceId id() { return id_; }
-
-  static void Swap(SemiSpace* from, SemiSpace* to);
-
-  // Approximate amount of physical memory committed for this space.
-  size_t CommittedPhysicalMemory() override;
+#ifdef VERIFY_HEAP
+  virtual void Verify();
+#endif
 
  private:
-  // Flips the semispace between being from-space and to-space.
-  // Copies the flags into the masked positions on all pages in the space.
-  void FlipPages(intptr_t flags, intptr_t flag_mask);
-
-  // Updates Capacity and MaximumCommitted based on new capacity.
-  void SetCapacity(int new_capacity);
-
   NewSpacePage* anchor() { return &anchor_; }
 
-  // The current and maximum total capacity of the space.
-  int total_capacity_;
-  int target_capacity_;
-  int maximum_total_capacity_;
-  int initial_total_capacity_;
+  void set_current_capacity(int new_capacity) {
+    current_capacity_ = new_capacity;
+  }
+
+  // Copies the flags into the masked positions on all pages in the space.
+  void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
+
+  // The currently committed space capacity.
+  int current_capacity_;
+
+  // The maximum capacity that can be used by this space.
+  int maximum_capacity_;
+
+  // The mimnimum capacity for the space. A space cannot shrink below this size.
+  int minimum_capacity_;
 
   // The start address of the space.
   Address start_;
   // Used to govern object promotion during mark-compact collection.
   Address age_mark_;
 
-  // Masks and comparison values to test for containment in this semispace.
-  uintptr_t address_mask_;
-  uintptr_t object_mask_;
-  uintptr_t object_expected_;
-
   bool committed_;
   SemiSpaceId id_;
 
@@ -2576,54 +2479,6 @@
   NewSpacePage* last_page_;
 };
 
-// -----------------------------------------------------------------------------
-// Allows observation of inline allocation in the new space.
-class InlineAllocationObserver {
- public:
-  explicit InlineAllocationObserver(intptr_t step_size)
-      : step_size_(step_size), bytes_to_next_step_(step_size) {
-    DCHECK(step_size >= kPointerSize);
-  }
-  virtual ~InlineAllocationObserver() {}
-
- private:
-  intptr_t step_size() const { return step_size_; }
-  intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
-
-  // Pure virtual method provided by the subclasses that gets called when at
-  // least step_size bytes have been allocated. soon_object is the address just
-  // allocated (but not yet initialized.) size is the size of the object as
-  // requested (i.e. w/o the alignment fillers). Some complexities to be aware
-  // of:
-  // 1) soon_object will be nullptr in cases where we end up observing an
-  //    allocation that happens to be a filler space (e.g. page boundaries.)
-  // 2) size is the requested size at the time of allocation. Right-trimming
-  //    may change the object size dynamically.
-  // 3) soon_object may actually be the first object in an allocation-folding
-  //    group. In such a case size is the size of the group rather than the
-  //    first object.
-  virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
-
-  // Called each time the new space does an inline allocation step. This may be
-  // more frequently than the step_size we are monitoring (e.g. when there are
-  // multiple observers, or when page or space boundary is encountered.)
-  void InlineAllocationStep(int bytes_allocated, Address soon_object,
-                            size_t size) {
-    bytes_to_next_step_ -= bytes_allocated;
-    if (bytes_to_next_step_ <= 0) {
-      Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
-           size);
-      bytes_to_next_step_ = step_size_;
-    }
-  }
-
-  intptr_t step_size_;
-  intptr_t bytes_to_next_step_;
-
-  friend class NewSpace;
-
-  DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver);
-};
 
 // -----------------------------------------------------------------------------
 // The young generation space.
@@ -2639,8 +2494,11 @@
         to_space_(heap, kToSpace),
         from_space_(heap, kFromSpace),
         reservation_(),
-        top_on_previous_step_(0),
-        inline_allocation_observers_paused_(false) {}
+        top_on_previous_step_(0) {}
+
+  inline bool Contains(HeapObject* o);
+  inline bool ContainsSlow(Address a);
+  inline bool Contains(Object* o);
 
   // Sets up the new space using the given chunk.
   bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
@@ -2661,24 +2519,9 @@
   // their maximum capacity.
   void Grow();
 
-  // Grow the capacity of the semispaces by one page.
-  bool GrowOnePage();
-
   // Shrink the capacity of the semispaces.
   void Shrink();
 
-  // True if the address or object lies in the address range of either
-  // semispace (not necessarily below the allocation pointer).
-  bool Contains(Address a) {
-    return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
-           reinterpret_cast<uintptr_t>(start_);
-  }
-
-  bool Contains(Object* o) {
-    Address a = reinterpret_cast<Address>(o);
-    return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
-  }
-
   // Return the allocated bytes in the active semispace.
   intptr_t Size() override {
     return pages_used_ * NewSpacePage::kAreaSize +
@@ -2692,16 +2535,16 @@
 
   // Return the allocatable capacity of a semispace.
   intptr_t Capacity() {
-    SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
-    return (to_space_.TotalCapacity() / Page::kPageSize) *
+    SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
+    return (to_space_.current_capacity() / Page::kPageSize) *
            NewSpacePage::kAreaSize;
   }
 
   // Return the current size of a semispace, allocatable and non-allocatable
   // memory.
   intptr_t TotalCapacity() {
-    DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
-    return to_space_.TotalCapacity();
+    DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
+    return to_space_.current_capacity();
   }
 
   // Committed memory for NewSpace is the committed memory of both semi-spaces
@@ -2742,18 +2585,16 @@
 
   // Return the maximum capacity of a semispace.
   int MaximumCapacity() {
-    DCHECK(to_space_.MaximumTotalCapacity() ==
-           from_space_.MaximumTotalCapacity());
-    return to_space_.MaximumTotalCapacity();
+    DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
+    return to_space_.maximum_capacity();
   }
 
   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
 
   // Returns the initial capacity of a semispace.
   int InitialTotalCapacity() {
-    DCHECK(to_space_.InitialTotalCapacity() ==
-           from_space_.InitialTotalCapacity());
-    return to_space_.InitialTotalCapacity();
+    DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
+    return to_space_.minimum_capacity();
   }
 
   // Return the address of the allocation pointer in the active semispace.
@@ -2779,18 +2620,6 @@
   // The start address of the space and a bit mask. Anding an address in the
   // new space with the mask will result in the start address.
   Address start() { return start_; }
-  uintptr_t mask() { return address_mask_; }
-
-  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
-    DCHECK(Contains(addr));
-    DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) ||
-           IsAligned(OffsetFrom(addr) - 1, kPointerSize));
-    return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
-  }
-
-  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
-    return reinterpret_cast<Address>(index << kPointerSizeLog2);
-  }
 
   // The allocation top and limit address.
   Address* allocation_top_address() { return allocation_info_.top_address(); }
@@ -2815,22 +2644,26 @@
   // Reset the allocation pointer to the beginning of the active semispace.
   void ResetAllocationInfo();
 
+  // When inline allocation stepping is active, either because of incremental
+  // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
+  // inline allocation every once in a while. This is done by setting
+  // allocation_info_.limit to be lower than the actual limit and and increasing
+  // it in steps to guarantee that the observers are notified periodically.
   void UpdateInlineAllocationLimit(int size_in_bytes);
 
-  // Allows observation of inline allocation. The observer->Step() method gets
-  // called after every step_size bytes have been allocated (approximately).
-  // This works by adjusting the allocation limit to a lower value and adjusting
-  // it after each step.
-  void AddInlineAllocationObserver(InlineAllocationObserver* observer);
-
-  // Removes a previously installed observer.
-  void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
-
   void DisableInlineAllocationSteps() {
     top_on_previous_step_ = 0;
     UpdateInlineAllocationLimit(0);
   }
 
+  // Allows observation of inline allocation. The observer->Step() method gets
+  // called after every step_size bytes have been allocated (approximately).
+  // This works by adjusting the allocation limit to a lower value and adjusting
+  // it after each step.
+  void AddAllocationObserver(AllocationObserver* observer) override;
+
+  void RemoveAllocationObserver(AllocationObserver* observer) override;
+
   // Get the extent of the inactive semispace (for use as a marking stack,
   // or to zap it). Notice: space-addresses are not necessarily on the
   // same page, so FromSpaceStart() might be above FromSpaceEnd().
@@ -2843,18 +2676,10 @@
   Address ToSpaceStart() { return to_space_.space_start(); }
   Address ToSpaceEnd() { return to_space_.space_end(); }
 
-  inline bool ToSpaceContains(Address address) {
-    return to_space_.Contains(address);
-  }
-  inline bool FromSpaceContains(Address address) {
-    return from_space_.Contains(address);
-  }
-
-  // True if the object is a heap object in the address range of the
-  // respective semispace (not necessarily below the allocation pointer of the
-  // semispace).
-  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
-  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+  inline bool ToSpaceContainsSlow(Address a);
+  inline bool FromSpaceContainsSlow(Address a);
+  inline bool ToSpaceContains(Object* o);
+  inline bool FromSpaceContains(Object* o);
 
   // Try to switch the active semispace to a new, empty, page.
   // Returns false if this isn't possible or reasonable (i.e., there
@@ -2901,6 +2726,9 @@
 
   SemiSpace* active_space() { return &to_space_; }
 
+  void PauseAllocationObservers() override;
+  void ResumeAllocationObservers() override;
+
  private:
   // Update allocation info to match the current to-space page.
   void UpdateAllocationInfo();
@@ -2918,22 +2746,12 @@
 
   // Start address and bit mask for containment testing.
   Address start_;
-  uintptr_t address_mask_;
-  uintptr_t object_mask_;
-  uintptr_t object_expected_;
 
   // Allocation pointer and limit for normal allocation and allocation during
   // mark-compact collection.
   AllocationInfo allocation_info_;
 
-  // When inline allocation stepping is active, either because of incremental
-  // marking or because of idle scavenge, we 'interrupt' inline allocation every
-  // once in a while. This is done by setting allocation_info_.limit to be lower
-  // than the actual limit and and increasing it in steps to guarantee that the
-  // observers are notified periodically.
-  List<InlineAllocationObserver*> inline_allocation_observers_;
   Address top_on_previous_step_;
-  bool inline_allocation_observers_paused_;
 
   HistogramInfo* allocated_histogram_;
   HistogramInfo* promoted_histogram_;
@@ -2950,26 +2768,18 @@
                             size_t size);
   intptr_t GetNextInlineAllocationStepSize();
   void StartNextInlineAllocationStep();
-  void PauseInlineAllocationObservers();
-  void ResumeInlineAllocationObservers();
 
-  friend class PauseInlineAllocationObserversScope;
   friend class SemiSpaceIterator;
 };
 
-class PauseInlineAllocationObserversScope {
+class PauseAllocationObserversScope {
  public:
-  explicit PauseInlineAllocationObserversScope(NewSpace* new_space)
-      : new_space_(new_space) {
-    new_space_->PauseInlineAllocationObservers();
-  }
-  ~PauseInlineAllocationObserversScope() {
-    new_space_->ResumeInlineAllocationObservers();
-  }
+  explicit PauseAllocationObserversScope(Heap* heap);
+  ~PauseAllocationObserversScope();
 
  private:
-  NewSpace* new_space_;
-  DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope);
+  Heap* heap_;
+  DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
 };
 
 // -----------------------------------------------------------------------------
@@ -2980,12 +2790,6 @@
   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
       : PagedSpace(heap, id, executable) {}
 
-  // Adds external memory starting at {start} of {size_in_bytes} to the space.
-  void AddExternalMemory(Address start, int size_in_bytes) {
-    IncreaseCapacity(size_in_bytes);
-    Free(start, size_in_bytes);
-  }
-
   bool is_local() override { return true; }
 
   void RefillFreeList() override;
@@ -3004,9 +2808,7 @@
  public:
   explicit CompactionSpaceCollection(Heap* heap)
       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
-        code_space_(heap, CODE_SPACE, Executability::EXECUTABLE),
-        duration_(0.0),
-        bytes_compacted_(0) {}
+        code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
 
   CompactionSpace* Get(AllocationSpace space) {
     switch (space) {
@@ -3021,21 +2823,9 @@
     return nullptr;
   }
 
-  void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
-    duration_ += duration;
-    bytes_compacted_ += bytes_compacted;
-  }
-
-  double duration() const { return duration_; }
-  intptr_t bytes_compacted() const { return bytes_compacted_; }
-
  private:
   CompactionSpace old_space_;
   CompactionSpace code_space_;
-
-  // Book keeping.
-  double duration_;
-  intptr_t bytes_compacted_;
 };
 
 
@@ -3153,7 +2943,9 @@
 
   // Checks whether a heap object is in this space; O(1).
   bool Contains(HeapObject* obj);
-  bool Contains(Address address);
+  // Checks whether an address is in the object area in this space. Iterates
+  // all objects in the space. May be slow.
+  bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
 
   // Checks whether the space is empty.
   bool IsEmpty() { return first_page_ == NULL; }
@@ -3169,9 +2961,6 @@
   void ReportStatistics();
   void CollectCodeStatistics();
 #endif
-  // Checks whether an address is in the object area in this space.  It
-  // iterates all objects in the space. May be slow.
-  bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
 
  private:
   // The head of the linked list of large object chunks.
diff --git a/src/heap/store-buffer-inl.h b/src/heap/store-buffer-inl.h
index e11ad87..920ec34 100644
--- a/src/heap/store-buffer-inl.h
+++ b/src/heap/store-buffer-inl.h
@@ -6,48 +6,30 @@
 #define V8_STORE_BUFFER_INL_H_
 
 #include "src/heap/heap.h"
+#include "src/heap/remembered-set.h"
 #include "src/heap/spaces-inl.h"
 #include "src/heap/store-buffer.h"
 
 namespace v8 {
 namespace internal {
 
-void StoreBuffer::Mark(Address addr) {
-  DCHECK(!heap_->code_space()->Contains(addr));
-  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-  *top++ = addr;
-  heap_->set_store_buffer_top(reinterpret_cast<Smi*>(top));
-  if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
-    DCHECK(top == limit_);
-    Compact();
-  } else {
-    DCHECK(top < limit_);
-  }
+void LocalStoreBuffer::Record(Address addr) {
+  if (top_->is_full()) top_ = new Node(top_);
+  top_->buffer[top_->count++] = addr;
 }
 
-
-inline void StoreBuffer::MarkSynchronized(Address addr) {
-  base::LockGuard<base::Mutex> lock_guard(&mutex_);
-  Mark(addr);
-}
-
-
-void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
-  if (store_buffer_rebuilding_enabled_) {
-    SLOW_DCHECK(!heap_->code_space()->Contains(addr) &&
-                !heap_->new_space()->Contains(addr));
-    Address* top = old_top_;
-    *top++ = addr;
-    old_top_ = top;
-    old_buffer_is_sorted_ = false;
-    old_buffer_is_filtered_ = false;
-    if (top >= old_limit_) {
-      DCHECK(callback_ != NULL);
-      (*callback_)(heap_, MemoryChunk::FromAnyPointerAddress(heap_, addr),
-                   kStoreBufferFullEvent);
+void LocalStoreBuffer::Process(StoreBuffer* store_buffer) {
+  Node* current = top_;
+  while (current != nullptr) {
+    for (int i = 0; i < current->count; i++) {
+      Address slot = current->buffer[i];
+      Page* page = Page::FromAnyPointerAddress(heap_, slot);
+      RememberedSet<OLD_TO_NEW>::Insert(page, slot);
     }
+    current = current->next;
   }
 }
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/heap/store-buffer.cc b/src/heap/store-buffer.cc
index a8a1e5b..21f375b 100644
--- a/src/heap/store-buffer.cc
+++ b/src/heap/store-buffer.cc
@@ -17,24 +17,7 @@
 namespace internal {
 
 StoreBuffer::StoreBuffer(Heap* heap)
-    : heap_(heap),
-      start_(NULL),
-      limit_(NULL),
-      old_start_(NULL),
-      old_limit_(NULL),
-      old_top_(NULL),
-      old_reserved_limit_(NULL),
-      old_buffer_is_sorted_(false),
-      old_buffer_is_filtered_(false),
-      during_gc_(false),
-      store_buffer_rebuilding_enabled_(false),
-      callback_(NULL),
-      may_move_store_buffer_entries_(true),
-      virtual_memory_(NULL),
-      hash_set_1_(NULL),
-      hash_set_2_(NULL),
-      hash_sets_are_empty_(true) {}
-
+    : heap_(heap), start_(nullptr), limit_(nullptr), virtual_memory_(nullptr) {}
 
 void StoreBuffer::SetUp() {
   // Allocate 3x the buffer size, so that we can start the new store buffer
@@ -47,31 +30,6 @@
       reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
   limit_ = start_ + (kStoreBufferSize / kPointerSize);
 
-  // Reserve space for the larger old buffer.
-  old_virtual_memory_ =
-      new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
-  old_top_ = old_start_ =
-      reinterpret_cast<Address*>(old_virtual_memory_->address());
-  // Don't know the alignment requirements of the OS, but it is certainly not
-  // less than 0xfff.
-  CHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
-  CHECK(kStoreBufferSize >= base::OS::CommitPageSize());
-  // Initial size of the old buffer is as big as the buffer for new pointers.
-  // This means even if we later fail to enlarge the old buffer due to OOM from
-  // the OS, we will still be able to empty the new pointer buffer into the old
-  // buffer.
-  int initial_length = static_cast<int>(kStoreBufferSize / kPointerSize);
-  CHECK(initial_length > 0);
-  CHECK(initial_length <= kOldStoreBufferLength);
-  old_limit_ = old_start_ + initial_length;
-  old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
-
-  if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
-                                   (old_limit_ - old_start_) * kPointerSize,
-                                   false)) {
-    V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
-  }
-
   DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
   DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
   Address* vm_limit = reinterpret_cast<Address*>(
@@ -90,533 +48,31 @@
     V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
   }
   heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
-
-  hash_set_1_ = new uintptr_t[kHashSetLength];
-  hash_set_2_ = new uintptr_t[kHashSetLength];
-  hash_sets_are_empty_ = false;
-
-  ClearFilteringHashSets();
 }
 
 
 void StoreBuffer::TearDown() {
   delete virtual_memory_;
-  delete old_virtual_memory_;
-  delete[] hash_set_1_;
-  delete[] hash_set_2_;
-  old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
   start_ = limit_ = NULL;
   heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
 }
 
 
 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
-  isolate->heap()->store_buffer()->Compact();
+  isolate->heap()->store_buffer()->MoveEntriesToRememberedSet();
   isolate->counters()->store_buffer_overflows()->Increment();
 }
 
-
-bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
-  return old_limit_ - old_top_ >= space_needed;
-}
-
-
-void StoreBuffer::EnsureSpace(intptr_t space_needed) {
-  while (old_limit_ - old_top_ < space_needed &&
-         old_limit_ < old_reserved_limit_) {
-    size_t grow = old_limit_ - old_start_;  // Double size.
-    if (old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
-                                    grow * kPointerSize, false)) {
-      old_limit_ += grow;
-    } else {
-      break;
-    }
-  }
-
-  if (SpaceAvailable(space_needed)) return;
-
-  if (old_buffer_is_filtered_) return;
-  DCHECK(may_move_store_buffer_entries_);
-  Compact();
-
-  old_buffer_is_filtered_ = true;
-  bool page_has_scan_on_scavenge_flag = false;
-
-  PointerChunkIterator it(heap_);
-  MemoryChunk* chunk;
-  while ((chunk = it.next()) != NULL) {
-    if (chunk->scan_on_scavenge()) {
-      page_has_scan_on_scavenge_flag = true;
-      break;
-    }
-  }
-
-  if (page_has_scan_on_scavenge_flag) {
-    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
-  }
-
-  if (SpaceAvailable(space_needed)) return;
-
-  // Sample 1 entry in 97 and filter out the pages where we estimate that more
-  // than 1 in 8 pointers are to new space.
-  static const int kSampleFinenesses = 5;
-  static const struct Samples {
-    int prime_sample_step;
-    int threshold;
-  } samples[kSampleFinenesses] = {
-        {97, ((Page::kPageSize / kPointerSize) / 97) / 8},
-        {23, ((Page::kPageSize / kPointerSize) / 23) / 16},
-        {7, ((Page::kPageSize / kPointerSize) / 7) / 32},
-        {3, ((Page::kPageSize / kPointerSize) / 3) / 256},
-        {1, 0}};
-  for (int i = 0; i < kSampleFinenesses; i++) {
-    ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
-    // As a last resort we mark all pages as being exempt from the store buffer.
-    DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
-    if (SpaceAvailable(space_needed)) return;
-  }
-  UNREACHABLE();
-}
-
-
-// Sample the store buffer to see if some pages are taking up a lot of space
-// in the store buffer.
-void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
-  PointerChunkIterator it(heap_);
-  MemoryChunk* chunk;
-  while ((chunk = it.next()) != NULL) {
-    chunk->set_store_buffer_counter(0);
-  }
-  bool created_new_scan_on_scavenge_pages = false;
-  MemoryChunk* previous_chunk = NULL;
-  for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
-    Address addr = *p;
-    MemoryChunk* containing_chunk = NULL;
-    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
-      containing_chunk = previous_chunk;
-    } else {
-      containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
-    }
-    int old_counter = containing_chunk->store_buffer_counter();
-    if (old_counter >= threshold) {
-      containing_chunk->set_scan_on_scavenge(true);
-      created_new_scan_on_scavenge_pages = true;
-    }
-    containing_chunk->set_store_buffer_counter(old_counter + 1);
-    previous_chunk = containing_chunk;
-  }
-  if (created_new_scan_on_scavenge_pages) {
-    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
-    heap_->isolate()->CountUsage(
-        v8::Isolate::UseCounterFeature::kStoreBufferOverflow);
-  }
-  old_buffer_is_filtered_ = true;
-}
-
-
-void StoreBuffer::Filter(int flag) {
-  Address* new_top = old_start_;
-  MemoryChunk* previous_chunk = NULL;
-  for (Address* p = old_start_; p < old_top_; p++) {
-    Address addr = *p;
-    MemoryChunk* containing_chunk = NULL;
-    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
-      containing_chunk = previous_chunk;
-    } else {
-      containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
-      previous_chunk = containing_chunk;
-    }
-    if (!containing_chunk->IsFlagSet(flag)) {
-      *new_top++ = addr;
-    }
-  }
-  old_top_ = new_top;
-
-  // Filtering hash sets are inconsistent with the store buffer after this
-  // operation.
-  ClearFilteringHashSets();
-}
-
-
-bool StoreBuffer::PrepareForIteration() {
-  Compact();
-  PointerChunkIterator it(heap_);
-  MemoryChunk* chunk;
-  bool page_has_scan_on_scavenge_flag = false;
-  while ((chunk = it.next()) != NULL) {
-    if (chunk->scan_on_scavenge()) {
-      page_has_scan_on_scavenge_flag = true;
-      break;
-    }
-  }
-
-  if (page_has_scan_on_scavenge_flag) {
-    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
-  }
-
-  // Filtering hash sets are inconsistent with the store buffer after
-  // iteration.
-  ClearFilteringHashSets();
-
-  return page_has_scan_on_scavenge_flag;
-}
-
-
-void StoreBuffer::ClearFilteringHashSets() {
-  if (!hash_sets_are_empty_) {
-    memset(reinterpret_cast<void*>(hash_set_1_), 0,
-           sizeof(uintptr_t) * kHashSetLength);
-    memset(reinterpret_cast<void*>(hash_set_2_), 0,
-           sizeof(uintptr_t) * kHashSetLength);
-    hash_sets_are_empty_ = true;
-  }
-}
-
-
-void StoreBuffer::GCPrologue() {
-  ClearFilteringHashSets();
-  during_gc_ = true;
-}
-
-
-#ifdef VERIFY_HEAP
-void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
-  LargeObjectIterator it(space);
-  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
-    if (object->IsFixedArray()) {
-      Address slot_address = object->address();
-      Address end = object->address() + object->Size();
-
-      while (slot_address < end) {
-        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
-        // When we are not in GC the Heap::InNewSpace() predicate
-        // checks that pointers which satisfy predicate point into
-        // the active semispace.
-        Object* object = *slot;
-        heap_->InNewSpace(object);
-        slot_address += kPointerSize;
-      }
-    }
-  }
-}
-#endif
-
-
-void StoreBuffer::Verify() {
-#ifdef VERIFY_HEAP
-  VerifyPointers(heap_->lo_space());
-#endif
-}
-
-
-void StoreBuffer::GCEpilogue() {
-  during_gc_ = false;
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    Verify();
-  }
-#endif
-}
-
-
-void StoreBuffer::ProcessOldToNewSlot(Address slot_address,
-                                      ObjectSlotCallback slot_callback) {
-  Object** slot = reinterpret_cast<Object**>(slot_address);
-  Object* object = *slot;
-
-  // If the object is not in from space, it must be a duplicate store buffer
-  // entry and the slot was already updated.
-  if (heap_->InFromSpace(object)) {
-    HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
-    DCHECK(heap_object->IsHeapObject());
-    slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
-    object = *slot;
-    // If the object was in from space before and is after executing the
-    // callback in to space, the object is still live.
-    // Unfortunately, we do not know about the slot. It could be in a
-    // just freed free space object.
-    if (heap_->InToSpace(object)) {
-      EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
-    }
-  }
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInRegion(
-    Address start, Address end, ObjectSlotCallback slot_callback) {
-  for (Address slot_address = start; slot_address < end;
-       slot_address += kPointerSize) {
-    ProcessOldToNewSlot(slot_address, slot_callback);
-  }
-}
-
-
-void StoreBuffer::IteratePointersInStoreBuffer(
-    ObjectSlotCallback slot_callback) {
-  Address* limit = old_top_;
-  old_top_ = old_start_;
-  {
-    DontMoveStoreBufferEntriesScope scope(this);
-    for (Address* current = old_start_; current < limit; current++) {
-#ifdef DEBUG
-      Address* saved_top = old_top_;
-#endif
-      ProcessOldToNewSlot(*current, slot_callback);
-      DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
-    }
-  }
-}
-
-
-void StoreBuffer::ClearInvalidStoreBufferEntries() {
-  Compact();
-  Address* new_top = old_start_;
-  for (Address* current = old_start_; current < old_top_; current++) {
-    Address addr = *current;
-    Object** slot = reinterpret_cast<Object**>(addr);
-    Object* object = *slot;
-    if (heap_->InNewSpace(object) && object->IsHeapObject()) {
-      // If the target object is not black, the source slot must be part
-      // of a non-black (dead) object.
-      HeapObject* heap_object = HeapObject::cast(object);
-      if (Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
-          heap_->mark_compact_collector()->IsSlotInLiveObject(addr)) {
-        *new_top++ = addr;
-      }
-    }
-  }
-  old_top_ = new_top;
-  ClearFilteringHashSets();
-
-  // Don't scan on scavenge dead large objects.
-  LargeObjectIterator it(heap_->lo_space());
-  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
-    MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
-    if (chunk->scan_on_scavenge() &&
-        Marking::IsWhite(Marking::MarkBitFrom(object))) {
-      chunk->set_scan_on_scavenge(false);
-    }
-  }
-}
-
-
-void StoreBuffer::VerifyValidStoreBufferEntries() {
-  for (Address* current = old_start_; current < old_top_; current++) {
-    Object** slot = reinterpret_cast<Object**>(*current);
-    Object* object = *slot;
-    CHECK(object->IsHeapObject());
-    CHECK(heap_->InNewSpace(object));
-    heap_->mark_compact_collector()->VerifyIsSlotInLiveObject(
-        reinterpret_cast<Address>(slot), HeapObject::cast(object));
-  }
-}
-
-
-class FindPointersToNewSpaceVisitor final : public ObjectVisitor {
- public:
-  FindPointersToNewSpaceVisitor(StoreBuffer* store_buffer,
-                                ObjectSlotCallback callback)
-      : store_buffer_(store_buffer), callback_(callback) {}
-
-  V8_INLINE void VisitPointers(Object** start, Object** end) override {
-    store_buffer_->FindPointersToNewSpaceInRegion(
-        reinterpret_cast<Address>(start), reinterpret_cast<Address>(end),
-        callback_);
-  }
-
-  V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
-
- private:
-  StoreBuffer* store_buffer_;
-  ObjectSlotCallback callback_;
-};
-
-
-void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
-  // We do not sort or remove duplicated entries from the store buffer because
-  // we expect that callback will rebuild the store buffer thus removing
-  // all duplicates and pointers to old space.
-  bool some_pages_to_scan = PrepareForIteration();
-
-  // TODO(gc): we want to skip slots on evacuation candidates
-  // but we can't simply figure that out from slot address
-  // because slot can belong to a large object.
-  IteratePointersInStoreBuffer(slot_callback);
-
-  // We are done scanning all the pointers that were in the store buffer, but
-  // there may be some pages marked scan_on_scavenge that have pointers to new
-  // space that are not in the store buffer.  We must scan them now.  As we
-  // scan, the surviving pointers to new space will be added to the store
-  // buffer.  If there are still a lot of pointers to new space then we will
-  // keep the scan_on_scavenge flag on the page and discard the pointers that
-  // were added to the store buffer.  If there are not many pointers to new
-  // space left on the page we will keep the pointers in the store buffer and
-  // remove the flag from the page.
-  if (some_pages_to_scan) {
-    if (callback_ != NULL) {
-      (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
-    }
-    PointerChunkIterator it(heap_);
-    MemoryChunk* chunk;
-    FindPointersToNewSpaceVisitor visitor(this, slot_callback);
-    while ((chunk = it.next()) != NULL) {
-      if (chunk->scan_on_scavenge()) {
-        chunk->set_scan_on_scavenge(false);
-        if (callback_ != NULL) {
-          (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
-        }
-        if (chunk->owner() == heap_->lo_space()) {
-          LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
-          HeapObject* array = large_page->GetObject();
-          DCHECK(array->IsFixedArray());
-          Address start = array->address();
-          Address end = start + array->Size();
-          FindPointersToNewSpaceInRegion(start, end, slot_callback);
-        } else {
-          Page* page = reinterpret_cast<Page*>(chunk);
-          PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
-          if (owner == heap_->map_space()) {
-            DCHECK(page->WasSwept());
-            HeapObjectIterator iterator(page);
-            for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
-                 heap_object = iterator.Next()) {
-              // We skip free space objects.
-              if (!heap_object->IsFiller()) {
-                DCHECK(heap_object->IsMap());
-                FindPointersToNewSpaceInRegion(
-                    heap_object->address() + Map::kPointerFieldsBeginOffset,
-                    heap_object->address() + Map::kPointerFieldsEndOffset,
-                    slot_callback);
-              }
-            }
-          } else {
-            if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
-              // Aborted pages require iterating using mark bits because they
-              // don't have an iterable object layout before sweeping (which can
-              // only happen later). Note that we can never reach an
-              // aborted page through the scavenger.
-              DCHECK_EQ(heap_->gc_state(), Heap::MARK_COMPACT);
-              heap_->mark_compact_collector()->VisitLiveObjectsBody(page,
-                                                                    &visitor);
-            } else {
-              heap_->mark_compact_collector()
-                  ->SweepOrWaitUntilSweepingCompleted(page);
-              HeapObjectIterator iterator(page);
-              for (HeapObject* heap_object = iterator.Next();
-                   heap_object != nullptr; heap_object = iterator.Next()) {
-                // We iterate over objects that contain new space pointers only.
-                heap_object->IterateBody(&visitor);
-              }
-            }
-          }
-        }
-      }
-    }
-    if (callback_ != NULL) {
-      (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
-    }
-  }
-}
-
-
-void StoreBuffer::Compact() {
+void StoreBuffer::MoveEntriesToRememberedSet() {
   Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-
   if (top == start_) return;
-
-  // There's no check of the limit in the loop below so we check here for
-  // the worst case (compaction doesn't eliminate any pointers).
   DCHECK(top <= limit_);
   heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
-  EnsureSpace(top - start_);
-  DCHECK(may_move_store_buffer_entries_);
-  // Goes through the addresses in the store buffer attempting to remove
-  // duplicates.  In the interest of speed this is a lossy operation.  Some
-  // duplicates will remain.  We have two hash sets with different hash
-  // functions to reduce the number of unnecessary clashes.
-  hash_sets_are_empty_ = false;  // Hash sets are in use.
   for (Address* current = start_; current < top; current++) {
     DCHECK(!heap_->code_space()->Contains(*current));
-    uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
-    // Shift out the last bits including any tags.
-    int_addr >>= kPointerSizeLog2;
-    // The upper part of an address is basically random because of ASLR and OS
-    // non-determinism, so we use only the bits within a page for hashing to
-    // make v8's behavior (more) deterministic.
-    uintptr_t hash_addr =
-        int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
-    int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
-                 (kHashSetLength - 1));
-    if (hash_set_1_[hash1] == int_addr) continue;
-    uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
-    hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
-    hash2 &= (kHashSetLength - 1);
-    if (hash_set_2_[hash2] == int_addr) continue;
-    if (hash_set_1_[hash1] == 0) {
-      hash_set_1_[hash1] = int_addr;
-    } else if (hash_set_2_[hash2] == 0) {
-      hash_set_2_[hash2] = int_addr;
-    } else {
-      // Rather than slowing down we just throw away some entries.  This will
-      // cause some duplicates to remain undetected.
-      hash_set_1_[hash1] = int_addr;
-      hash_set_2_[hash2] = 0;
-    }
-    old_buffer_is_sorted_ = false;
-    old_buffer_is_filtered_ = false;
-    *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
-    DCHECK(old_top_ <= old_limit_);
-  }
-  heap_->isolate()->counters()->store_buffer_compactions()->Increment();
-}
-
-
-void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
-  if (event == kStoreBufferStartScanningPagesEvent) {
-    start_of_current_page_ = NULL;
-    current_page_ = NULL;
-  } else if (event == kStoreBufferScanningPageEvent) {
-    if (current_page_ != NULL) {
-      // If this page already overflowed the store buffer during this iteration.
-      if (current_page_->scan_on_scavenge()) {
-        // Then we should wipe out the entries that have been added for it.
-        store_buffer_->SetTop(start_of_current_page_);
-      } else if (store_buffer_->Top() - start_of_current_page_ >=
-                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
-        // Did we find too many pointers in the previous page?  The heuristic is
-        // that no page can take more then 1/5 the remaining slots in the store
-        // buffer.
-        current_page_->set_scan_on_scavenge(true);
-        store_buffer_->SetTop(start_of_current_page_);
-      } else {
-        // In this case the page we scanned took a reasonable number of slots in
-        // the store buffer.  It has now been rehabilitated and is no longer
-        // marked scan_on_scavenge.
-        DCHECK(!current_page_->scan_on_scavenge());
-      }
-    }
-    start_of_current_page_ = store_buffer_->Top();
-    current_page_ = page;
-  } else if (event == kStoreBufferFullEvent) {
-    // The current page overflowed the store buffer again.  Wipe out its entries
-    // in the store buffer and mark it scan-on-scavenge again.  This may happen
-    // several times while scanning.
-    if (current_page_ == NULL) {
-      // Store Buffer overflowed while scanning promoted objects.  These are not
-      // in any particular page, though they are likely to be clustered by the
-      // allocation routines.
-      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
-    } else {
-      // Store Buffer overflowed while scanning a particular old space page for
-      // pointers to new space.
-      DCHECK(current_page_ == page);
-      DCHECK(page != NULL);
-      current_page_->set_scan_on_scavenge(true);
-      DCHECK(start_of_current_page_ != store_buffer_->Top());
-      store_buffer_->SetTop(start_of_current_page_);
-    }
-  } else {
-    UNREACHABLE();
+    Address addr = *current;
+    Page* page = Page::FromAnyPointerAddress(heap_, addr);
+    RememberedSet<OLD_TO_NEW>::Insert(page, addr);
   }
 }
 
diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h
index 9eeb001..e7e9c98 100644
--- a/src/heap/store-buffer.h
+++ b/src/heap/store-buffer.h
@@ -9,213 +9,72 @@
 #include "src/base/logging.h"
 #include "src/base/platform/platform.h"
 #include "src/globals.h"
+#include "src/heap/slot-set.h"
 
 namespace v8 {
 namespace internal {
 
-class Page;
-class PagedSpace;
-class StoreBuffer;
-
-typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-
-// Used to implement the write barrier by collecting addresses of pointers
-// between spaces.
+// Intermediate buffer that accumulates old-to-new stores from the generated
+// code. On buffer overflow the slots are moved to the remembered set.
 class StoreBuffer {
  public:
   explicit StoreBuffer(Heap* heap);
-
   static void StoreBufferOverflow(Isolate* isolate);
-
   void SetUp();
   void TearDown();
 
-  // This is used to add addresses to the store buffer non-concurrently.
-  inline void Mark(Address addr);
-
-  // This is used to add addresses to the store buffer when multiple threads
-  // may operate on the store buffer.
-  inline void MarkSynchronized(Address addr);
-
-  // This is used by the heap traversal to enter the addresses into the store
-  // buffer that should still be in the store buffer after GC.  It enters
-  // addresses directly into the old buffer because the GC starts by wiping the
-  // old buffer and thereafter only visits each cell once so there is no need
-  // to attempt to remove any dupes.  During the first part of a GC we
-  // are using the store buffer to access the old spaces and at the same time
-  // we are rebuilding the store buffer using this function.  There is, however
-  // no issue of overwriting the buffer we are iterating over, because this
-  // stage of the scavenge can only reduce the number of addresses in the store
-  // buffer (some objects are promoted so pointers to them do not need to be in
-  // the store buffer).  The later parts of the GC scan the pages that are
-  // exempt from the store buffer and process the promotion queue.  These steps
-  // can overflow this buffer.  We check for this and on overflow we call the
-  // callback set up with the StoreBufferRebuildScope object.
-  inline void EnterDirectlyIntoStoreBuffer(Address addr);
-
-  // Iterates over all pointers that go from old space to new space.  It will
-  // delete the store buffer as it starts so the callback should reenter
-  // surviving old-to-new pointers into the store buffer to rebuild it.
-  void IteratePointersToNewSpace(ObjectSlotCallback callback);
-
   static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
   static const int kStoreBufferSize = kStoreBufferOverflowBit;
   static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
-  static const int kOldStoreBufferLength = kStoreBufferLength * 16;
-  static const int kHashSetLengthLog2 = 12;
-  static const int kHashSetLength = 1 << kHashSetLengthLog2;
 
-  void Compact();
-
-  void GCPrologue();
-  void GCEpilogue();
-
-  Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
-  Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
-  Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
-  void SetTop(Object*** top) {
-    DCHECK(top >= Start());
-    DCHECK(top <= Limit());
-    old_top_ = reinterpret_cast<Address*>(top);
-  }
-
-  bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
-  bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
-
-  void EnsureSpace(intptr_t space_needed);
-  void Verify();
-
-  bool PrepareForIteration();
-
-  void Filter(int flag);
-
-  // Eliminates all stale store buffer entries from the store buffer, i.e.,
-  // slots that are not part of live objects anymore. This method must be
-  // called after marking, when the whole transitive closure is known and
-  // must be called before sweeping when mark bits are still intact.
-  void ClearInvalidStoreBufferEntries();
-  void VerifyValidStoreBufferEntries();
+  void MoveEntriesToRememberedSet();
 
  private:
   Heap* heap_;
 
-  // The store buffer is divided up into a new buffer that is constantly being
-  // filled by mutator activity and an old buffer that is filled with the data
-  // from the new buffer after compression.
+  // The start and the limit of the buffer that contains store slots
+  // added from the generated code.
   Address* start_;
   Address* limit_;
 
-  Address* old_start_;
-  Address* old_limit_;
-  Address* old_top_;
-  Address* old_reserved_limit_;
-  base::VirtualMemory* old_virtual_memory_;
-
-  bool old_buffer_is_sorted_;
-  bool old_buffer_is_filtered_;
-  bool during_gc_;
-  // The garbage collector iterates over many pointers to new space that are not
-  // handled by the store buffer.  This flag indicates whether the pointers
-  // found by the callbacks should be added to the store buffer or not.
-  bool store_buffer_rebuilding_enabled_;
-  StoreBufferCallback callback_;
-  bool may_move_store_buffer_entries_;
-
   base::VirtualMemory* virtual_memory_;
-
-  // Two hash sets used for filtering.
-  // If address is in the hash set then it is guaranteed to be in the
-  // old part of the store buffer.
-  uintptr_t* hash_set_1_;
-  uintptr_t* hash_set_2_;
-  bool hash_sets_are_empty_;
-
-  // Used for synchronization of concurrent store buffer access.
-  base::Mutex mutex_;
-
-  void ClearFilteringHashSets();
-
-  bool SpaceAvailable(intptr_t space_needed);
-  void ExemptPopularPages(int prime_sample_step, int threshold);
-
-  void ProcessOldToNewSlot(Address slot_address,
-                           ObjectSlotCallback slot_callback);
-
-  void FindPointersToNewSpaceInRegion(Address start, Address end,
-                                      ObjectSlotCallback slot_callback);
-
-  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
-
-#ifdef VERIFY_HEAP
-  void VerifyPointers(LargeObjectSpace* space);
-#endif
-
-  friend class DontMoveStoreBufferEntriesScope;
-  friend class FindPointersToNewSpaceVisitor;
-  friend class StoreBufferRebuildScope;
 };
 
 
-class StoreBufferRebuilder {
+class LocalStoreBuffer BASE_EMBEDDED {
  public:
-  explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
-      : store_buffer_(store_buffer) {}
+  explicit LocalStoreBuffer(Heap* heap)
+      : top_(new Node(nullptr)), heap_(heap) {}
 
-  void Callback(MemoryChunk* page, StoreBufferEvent event);
+  ~LocalStoreBuffer() {
+    Node* current = top_;
+    while (current != nullptr) {
+      Node* tmp = current->next;
+      delete current;
+      current = tmp;
+    }
+  }
+
+  inline void Record(Address addr);
+  inline void Process(StoreBuffer* store_buffer);
 
  private:
-  StoreBuffer* store_buffer_;
+  static const int kBufferSize = 16 * KB;
 
-  // We record in this variable how full the store buffer was when we started
-  // iterating over the current page, finding pointers to new space.  If the
-  // store buffer overflows again we can exempt the page from the store buffer
-  // by rewinding to this point instead of having to search the store buffer.
-  Object*** start_of_current_page_;
-  // The current page we are scanning in the store buffer iterator.
-  MemoryChunk* current_page_;
+  struct Node : Malloced {
+    explicit Node(Node* next_node) : next(next_node), count(0) {}
+
+    inline bool is_full() { return count == kBufferSize; }
+
+    Node* next;
+    Address buffer[kBufferSize];
+    int count;
+  };
+
+  Node* top_;
+  Heap* heap_;
 };
 
-
-class StoreBufferRebuildScope {
- public:
-  explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer,
-                                   StoreBufferCallback callback)
-      : store_buffer_(store_buffer),
-        stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
-        stored_callback_(store_buffer->callback_) {
-    store_buffer_->store_buffer_rebuilding_enabled_ = true;
-    store_buffer_->callback_ = callback;
-    (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
-  }
-
-  ~StoreBufferRebuildScope() {
-    store_buffer_->callback_ = stored_callback_;
-    store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
-  }
-
- private:
-  StoreBuffer* store_buffer_;
-  bool stored_state_;
-  StoreBufferCallback stored_callback_;
-};
-
-
-class DontMoveStoreBufferEntriesScope {
- public:
-  explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
-      : store_buffer_(store_buffer),
-        stored_state_(store_buffer->may_move_store_buffer_entries_) {
-    store_buffer_->may_move_store_buffer_entries_ = false;
-  }
-
-  ~DontMoveStoreBufferEntriesScope() {
-    store_buffer_->may_move_store_buffer_entries_ = stored_state_;
-  }
-
- private:
-  StoreBuffer* store_buffer_;
-  bool stored_state_;
-};
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index d957872..cb6bad8 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -203,10 +203,8 @@
     Assembler::FlushICache(isolate_, pc_, sizeof(Address));
   }
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
-    // TODO(1550) We are passing NULL as a slot because cell can never be on
-    // evacuation candidate.
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), NULL, cell);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+                                                                  cell);
   }
 }
 
@@ -268,16 +266,6 @@
 }
 
 
-bool RelocInfo::IsPatchedReturnSequence() {
-  return *pc_ == kCallOpcode;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
-  return !Assembler::IsNop(pc());
-}
-
-
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index f120a62..2ac3088 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -803,6 +803,11 @@
   emit_operand(reg, op);
 }
 
+void Assembler::cmp(const Operand& op, Register reg) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x39);
+  emit_operand(reg, op);
+}
 
 void Assembler::cmp(const Operand& op, const Immediate& imm) {
   EnsureSpace ensure_space(this);
@@ -2000,6 +2005,15 @@
 }
 
 
+void Assembler::cvtsi2ss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0x2A);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   EMIT(0xF2);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 0b20252..f517c98 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -184,6 +184,8 @@
 #undef DECLARE_REGISTER
 const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
 
+typedef DoubleRegister Simd128Register;
+
 typedef DoubleRegister XMMRegister;
 
 enum Condition {
@@ -676,6 +678,7 @@
   void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
   void cmp(Register reg, const Operand& op);
   void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
+  void cmp(const Operand& op, Register reg);
   void cmp(const Operand& op, const Immediate& imm);
   void cmp(const Operand& op, Handle<Object> handle);
 
@@ -960,6 +963,8 @@
   }
   void cvtsd2si(Register dst, XMMRegister src);
 
+  void cvtsi2ss(XMMRegister dst, Register src) { cvtsi2ss(dst, Operand(src)); }
+  void cvtsi2ss(XMMRegister dst, const Operand& src);
   void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
   void cvtsi2sd(XMMRegister dst, const Operand& src);
   void cvtss2sd(XMMRegister dst, const Operand& src);
@@ -1408,7 +1413,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, const SourcePosition position);
+  void RecordDeoptReason(const int reason, int raw_position);
 
   // Writes a single byte or word of data in the code stream.  Used for
   // inline tables, e.g., jump-tables.
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index a2aec74..c48c74a 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -60,42 +60,45 @@
   __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
 }
 
-
-static void CallRuntimePassFunction(
-    MacroAssembler* masm, Runtime::FunctionId function_id) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
   // ----------- S t a t e -------------
+  //  -- eax : argument count (preserved for callee)
   //  -- edx : new target (preserved for callee)
   //  -- edi : target function (preserved for callee)
   // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    __ SmiTag(eax);
+    __ push(eax);
+    // Push a copy of the target function and the new target.
+    __ push(edi);
+    __ push(edx);
+    // Function is also the parameter to the runtime call.
+    __ push(edi);
 
-  FrameScope scope(masm, StackFrame::INTERNAL);
-  // Push a copy of the target function and the new target.
-  __ push(edi);
-  __ push(edx);
-  // Function is also the parameter to the runtime call.
-  __ push(edi);
+    __ CallRuntime(function_id, 1);
+    __ mov(ebx, eax);
 
-  __ CallRuntime(function_id, 1);
-  // Restore target function and new target.
-  __ pop(edx);
-  __ pop(edi);
+    // Restore target function and new target.
+    __ pop(edx);
+    __ pop(edi);
+    __ pop(eax);
+    __ SmiUntag(eax);
+  }
+
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(ebx);
 }
 
-
 static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
-  __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
-  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(eax);
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(ebx);
 }
 
-
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
-  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(eax);
-}
-
-
 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
   // Checking whether the queued function is ready for install is optional,
   // since we come across interrupts and stack checks elsewhere.  However,
@@ -108,17 +111,16 @@
   __ cmp(esp, Operand::StaticVariable(stack_limit));
   __ j(above_equal, &ok, Label::kNear);
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
 
   __ bind(&ok);
   GenerateTailCallToSharedCode(masm);
 }
 
-
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
-                                           bool create_implicit_receiver) {
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- eax: number of arguments
   //  -- edi: constructor function
@@ -137,148 +139,20 @@
     __ push(eax);
 
     if (create_implicit_receiver) {
-      __ push(edi);
-      __ push(edx);
+      // Allocate the new receiver object.
+      __ Push(edi);
+      __ Push(edx);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(ebx, eax);
+      __ Pop(edx);
+      __ Pop(edi);
 
-      // Try to allocate the object without transitioning into C code. If any of
-      // the preconditions is not met, the code bails out to the runtime call.
-      Label rt_call, allocated;
-      if (FLAG_inline_new) {
-        // Verify that the new target is a JSFunction.
-        __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
-        __ j(not_equal, &rt_call);
-
-        // Load the initial map and verify that it is in fact a map.
-        // edx: new target
-        __ mov(eax,
-               FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
-        // Will both indicate a NULL and a Smi
-        __ JumpIfSmi(eax, &rt_call);
-        // edi: constructor
-        // eax: initial map (if proven valid below)
-        __ CmpObjectType(eax, MAP_TYPE, ebx);
-        __ j(not_equal, &rt_call);
-
-        // Fall back to runtime if the expected base constructor and base
-        // constructor differ.
-        __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
-        __ j(not_equal, &rt_call);
-
-        // Check that the constructor is not constructing a JSFunction (see
-        // comments in Runtime_NewObject in runtime.cc). In which case the
-        // initial map's instance type would be JS_FUNCTION_TYPE.
-        // edi: constructor
-        // eax: initial map
-        __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
-        __ j(equal, &rt_call);
-
-        // Now allocate the JSObject on the heap.
-        // edi: constructor
-        // eax: initial map
-        __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
-        __ shl(edi, kPointerSizeLog2);
-
-        __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-
-        Factory* factory = masm->isolate()->factory();
-
-        // Allocated the JSObject, now initialize the fields.
-        // eax: initial map
-        // ebx: JSObject (not HeapObject tagged - the actual address).
-        // edi: start of next object
-        __ mov(Operand(ebx, JSObject::kMapOffset), eax);
-        __ mov(ecx, factory->empty_fixed_array());
-        __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
-        __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
-        __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
-
-        // Add the object tag to make the JSObject real, so that we can continue
-        // and jump into the continuation code at any time from now on.
-        __ or_(ebx, Immediate(kHeapObjectTag));
-
-        // Fill all the in-object properties with the appropriate filler.
-        // ebx: JSObject (tagged)
-        // ecx: First in-object property of JSObject (not tagged)
-        __ mov(edx, factory->undefined_value());
-
-        if (!is_api_function) {
-          Label no_inobject_slack_tracking;
-
-          // The code below relies on these assumptions.
-          STATIC_ASSERT(Map::kNoSlackTracking == 0);
-          STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-          // Check if slack tracking is enabled.
-          __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
-          __ shr(esi, Map::ConstructionCounter::kShift);
-          __ j(zero, &no_inobject_slack_tracking);  // Map::kNoSlackTracking
-          __ push(esi);  // Save allocation count value.
-          // Decrease generous allocation count.
-          __ sub(FieldOperand(eax, Map::kBitField3Offset),
-                 Immediate(1 << Map::ConstructionCounter::kShift));
-
-          // Allocate object with a slack.
-          __ movzx_b(esi, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
-          __ neg(esi);
-          __ lea(esi, Operand(edi, esi, times_pointer_size, 0));
-          // esi: offset of first field after pre-allocated fields
-          if (FLAG_debug_code) {
-            __ cmp(ecx, esi);
-            __ Assert(less_equal,
-                      kUnexpectedNumberOfPreAllocatedPropertyFields);
-          }
-          __ InitializeFieldsWithFiller(ecx, esi, edx);
-
-          // To allow truncation fill the remaining fields with one pointer
-          // filler map.
-          __ mov(edx, factory->one_pointer_filler_map());
-          __ InitializeFieldsWithFiller(ecx, edi, edx);
-
-          __ pop(esi);  // Restore allocation count value before decreasing.
-          __ cmp(esi, Map::kSlackTrackingCounterEnd);
-          __ j(not_equal, &allocated);
-
-          // Push the object to the stack, and then the initial map as
-          // an argument to the runtime call.
-          __ push(ebx);
-          __ push(eax);  // initial map
-          __ CallRuntime(Runtime::kFinalizeInstanceSize);
-          __ pop(ebx);
-
-          // Continue with JSObject being successfully allocated
-          // ebx: JSObject (tagged)
-          __ jmp(&allocated);
-
-          __ bind(&no_inobject_slack_tracking);
-        }
-
-        __ InitializeFieldsWithFiller(ecx, edi, edx);
-
-        // Continue with JSObject being successfully allocated
-        // ebx: JSObject (tagged)
-        __ jmp(&allocated);
-      }
-
-      // Allocate the new receiver object using the runtime call.
-      // edx: new target
-      __ bind(&rt_call);
-      int offset = kPointerSize;
-
-      // Must restore esi (context) and edi (constructor) before calling
-      // runtime.
-      __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ mov(edi, Operand(esp, offset));
-      __ push(edi);  // constructor function
-      __ push(edx);  // new target
-      __ CallRuntime(Runtime::kNewObject);
-      __ mov(ebx, eax);  // store result in ebx
-
-      // New object allocated.
-      // ebx: newly allocated object
-      __ bind(&allocated);
-
-      // Restore the parameters.
-      __ pop(edx);  // new.target
-      __ pop(edi);  // Constructor function.
+      // ----------- S t a t e -------------
+      //  -- edi: constructor function
+      //  -- ebx: newly allocated object
+      //  -- edx: new target
+      // -----------------------------------
 
       // Retrieve smi-tagged arguments count from the stack.
       __ mov(eax, Operand(esp, 0));
@@ -331,8 +205,7 @@
 
     if (create_implicit_receiver) {
       // If the result is an object (in the ECMA sense), we should get rid
-      // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-      // on page 74.
+      // of the receiver and use the result.
       Label use_receiver, exit;
 
       // If the result is a smi, it is *not* an object in the ECMA sense.
@@ -359,6 +232,19 @@
     // Leave construct frame.
   }
 
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(eax, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
   // Remove caller arguments from the stack and return.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ pop(ecx);
@@ -372,17 +258,23 @@
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, true);
+  Generate_JSConstructStubHelper(masm, false, true, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true, true);
+  Generate_JSConstructStubHelper(masm, true, false, false);
 }
 
 
 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, false);
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
 
@@ -513,10 +405,8 @@
 //   o ebp: the caller's frame pointer
 //   o esp: stack pointer (pointing to return address)
 //
-// The function builds a JS frame.  Please see JavaScriptFrameConstants in
-// frames-ia32.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -528,14 +418,17 @@
   __ push(edi);  // Callee's JS function.
   __ push(edx);  // Callee's new target.
 
-  // Push zero for bytecode array offset.
-  __ push(Immediate(0));
-
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into edi (InterpreterBytecodeRegister).
   __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+         Immediate(DebugInfo::uninitialized()));
+  __ j(not_equal, &load_debug_bytecode_array);
   __ mov(kInterpreterBytecodeArrayRegister,
          FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+  __ bind(&bytecode_array_loaded);
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -545,6 +438,11 @@
     __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Push bytecode array.
+  __ push(kInterpreterBytecodeArrayRegister);
+  // Push zero for bytecode array offset.
+  __ push(Immediate(0));
+
   // Allocate the local and temporary register file on the stack.
   {
     // Load frame size from the BytecodeArray object.
@@ -578,24 +476,9 @@
 
   // TODO(rmcilroy): List of things not currently dealt with here but done in
   // fullcodegen's prologue:
-  //  - Support profiler (specifically profiling_counter).
   //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Allow simulator stop operations if FLAG_stop_at is set.
   //  - Code aging of the BytecodeArray object.
 
-  // Perform stack guard check.
-  {
-    Label ok;
-    ExternalReference stack_limit =
-        ExternalReference::address_of_stack_limit(masm->isolate());
-    __ cmp(esp, Operand::StaticVariable(stack_limit));
-    __ j(above_equal, &ok);
-    __ push(kInterpreterBytecodeArrayRegister);
-    __ CallRuntime(Runtime::kStackGuard);
-    __ pop(kInterpreterBytecodeArrayRegister);
-    __ bind(&ok);
-  }
-
   // Load accumulator, register file, bytecode offset, dispatch table into
   // registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -604,10 +487,8 @@
          Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
   __ mov(kInterpreterBytecodeOffsetRegister,
          Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
-  // Since the dispatch table root might be set after builtins are generated,
-  // load directly from the roots table.
-  __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
-  __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
+                  masm->isolate())));
 
   // Push dispatch table as a stack located parameter to the bytecode handler.
   DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
@@ -625,8 +506,17 @@
   // and header removal.
   __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
   __ call(ebx);
-  __ nop();  // Ensure that return address still counts as interpreter entry
-             // trampoline.
+
+  // Even though the first bytecode handler was called, we will never return.
+  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+  // Load debug copy of the bytecode array.
+  __ bind(&load_debug_bytecode_array);
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  __ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+  __ mov(kInterpreterBytecodeArrayRegister,
+         FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+  __ jmp(&bytecode_array_loaded);
 }
 
 
@@ -671,7 +561,8 @@
 
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- ebx : the address of the first argument to be pushed. Subsequent
@@ -694,7 +585,9 @@
 
   // Call the target.
   __ Push(edx);  // Re-push return address.
-  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            tail_call_mode),
+          RelocInfo::CODE_TARGET);
 }
 
 
@@ -739,33 +632,16 @@
 }
 
 
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(kInterpreterAccumulatorRegister);  // Save accumulator register.
-
-    // Pass the deoptimization type to the runtime system.
-    __ Push(Smi::FromInt(static_cast<int>(type)));
-
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-
-    __ Pop(kInterpreterAccumulatorRegister);  // Restore accumulator register.
-    // Tear down internal frame.
-  }
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
   // Initialize register file register.
   __ mov(kInterpreterRegisterFileRegister, ebp);
   __ add(kInterpreterRegisterFileRegister,
          Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
 
   // Get the bytecode array pointer from the frame.
-  __ mov(ebx, Operand(kInterpreterRegisterFileRegister,
-                      InterpreterFrameConstants::kFunctionFromRegisterPointer));
-  __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
   __ mov(kInterpreterBytecodeArrayRegister,
-         FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+         Operand(kInterpreterRegisterFileRegister,
+                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -782,12 +658,13 @@
               InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
 
-  // Push dispatch table as a stack located parameter to the bytecode handler -
-  // overwrite the state slot (we don't use these for interpreter deopts).
-  __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
-  __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+  // Push dispatch table as a stack located parameter to the bytecode handler.
+  __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
+                  masm->isolate())));
   DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
-  __ mov(Operand(esp, kPointerSize), ebx);
+  __ Pop(esi);
+  __ Push(ebx);
+  __ Push(esi);
 
   // Dispatch to the target bytecode.
   __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
@@ -795,8 +672,6 @@
   __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
 
   // Get the context from the frame.
-  // TODO(rmcilroy): Update interpreter frame to expect current context at the
-  // context slot instead of the function context.
   __ mov(kContextRegister,
          Operand(kInterpreterRegisterFileRegister,
                  InterpreterFrameConstants::kContextFromRegisterPointer));
@@ -808,6 +683,32 @@
 }
 
 
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+    MacroAssembler* masm, Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass the deoptimization type to the runtime system.
+    __ Push(Smi::FromInt(static_cast<int>(type)));
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+    // Tear down internal frame.
+  }
+
+  // Drop state (we don't use these for interpreter deopts) and and pop the
+  // accumulator value into the accumulator register and push PC at top
+  // of stack (to simulate initial call to bytecode handler in interpreter entry
+  // trampoline).
+  __ Pop(ebx);
+  __ Drop(1);
+  __ Pop(kInterpreterAccumulatorRegister);
+  __ Push(ebx);
+
+  // Enter the bytecode dispatch.
+  Generate_EnterBytecodeDispatch(masm);
+}
+
+
 void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
 }
@@ -822,22 +723,30 @@
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the address of the interpreter entry trampoline as a return address.
+  // This simulates the initial call to bytecode handlers in interpreter entry
+  // trampoline. The return will never actually be taken, but our stack walker
+  // uses this address to determine whether a frame is interpreted.
+  __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
+
+  Generate_EnterBytecodeDispatch(masm);
+}
+
 
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
 }
 
 
 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
 }
 
 
@@ -1375,6 +1284,122 @@
 
 
 // static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Condition const cc = (kind == MathMaxMinKind::kMin) ? below : above;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  XMMRegister const reg = (kind == MathMaxMinKind::kMin) ? xmm1 : xmm0;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in edx and the double value in xmm0.
+  __ LoadRoot(edx, root_index);
+  __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+  __ Move(ecx, eax);
+
+  Label done_loop, loop;
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ test(ecx, ecx);
+    __ j(zero, &done_loop);
+
+    // Load the next parameter tagged value into ebx.
+    __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
+
+    // Load the double value of the parameter into xmm1, maybe converting the
+    // parameter to a number first using the ToNumberStub if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(ebx, &convert_smi);
+    __ JumpIfRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+                  Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumberStub to convert it.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(eax);
+      __ SmiTag(ecx);
+      __ Push(eax);
+      __ Push(ecx);
+      __ Push(edx);
+      __ mov(eax, ebx);
+      ToNumberStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(ebx, eax);
+      __ Pop(edx);
+      __ Pop(ecx);
+      __ Pop(eax);
+      {
+        // Restore the double accumulator value (xmm0).
+        Label restore_smi, done_restore;
+        __ JumpIfSmi(edx, &restore_smi, Label::kNear);
+        __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+        __ jmp(&done_restore, Label::kNear);
+        __ bind(&restore_smi);
+        __ SmiUntag(edx);
+        __ Cvtsi2sd(xmm0, edx);
+        __ SmiTag(edx);
+        __ bind(&done_restore);
+      }
+      __ SmiUntag(ecx);
+      __ SmiUntag(eax);
+    }
+    __ jmp(&convert);
+    __ bind(&convert_number);
+    __ movsd(xmm1, FieldOperand(ebx, HeapNumber::kValueOffset));
+    __ jmp(&done_convert, Label::kNear);
+    __ bind(&convert_smi);
+    __ SmiUntag(ebx);
+    __ Cvtsi2sd(xmm1, ebx);
+    __ SmiTag(ebx);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (xmm0) and the next parameter value on the right hand side (xmm1).
+    Label compare_equal, compare_nan, compare_swap, done_compare;
+    __ ucomisd(xmm0, xmm1);
+    __ j(parity_even, &compare_nan, Label::kNear);
+    __ j(cc, &done_compare, Label::kNear);
+    __ j(equal, &compare_equal, Label::kNear);
+
+    // Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ movaps(xmm0, xmm1);
+    __ mov(edx, ebx);
+    __ jmp(&done_compare, Label::kNear);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    __ LoadRoot(edx, Heap::kNanValueRootIndex);
+    __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+    __ jmp(&done_compare, Label::kNear);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ bind(&compare_equal);
+    __ movmskpd(edi, reg);
+    __ test(edi, Immediate(1));
+    __ j(not_zero, &compare_swap);
+
+    __ bind(&done_compare);
+    __ dec(ecx);
+    __ jmp(&loop);
+  }
+
+  __ bind(&done_loop);
+  __ PopReturnAddressTo(ecx);
+  __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+  __ PushReturnAddressFrom(ecx);
+  __ mov(eax, edx);
+  __ Ret();
+}
+
+// static
 void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax                 : number of arguments
@@ -1472,9 +1497,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(ebx);  // the first argument
-    __ Push(edi);  // constructor function
-    __ Push(edx);  // new target
-    __ CallRuntime(Runtime::kNewObject);
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(FieldOperand(eax, JSValue::kValueOffset));
   }
   __ Ret();
@@ -1606,9 +1630,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(ebx);  // the first argument
-    __ Push(edi);  // constructor function
-    __ Push(edx);  // new target
-    __ CallRuntime(Runtime::kNewObject);
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(FieldOperand(eax, JSValue::kValueOffset));
   }
   __ Ret();
@@ -1724,9 +1747,7 @@
 
     // Try to create the list from an arguments object.
     __ bind(&create_arguments);
-    __ mov(ebx,
-           FieldOperand(eax, JSObject::kHeaderSize +
-                                 Heap::kArgumentsLengthIndex * kPointerSize));
+    __ mov(ebx, FieldOperand(eax, JSArgumentsObject::kLengthOffset));
     __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
     __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
     __ j(not_equal, &create_runtime);
@@ -1808,10 +1829,138 @@
   }
 }
 
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg
+// |  f()'s caller pc      <- sp
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if the debugger is not active.
+  Label done;
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(masm->isolate());
+  __ movzx_b(scratch1, Operand::StaticVariable(debug_is_active));
+  __ cmp(scratch1, Immediate(0));
+  __ j(not_equal, &done, Label::kNear);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+           Immediate(Smi::FromInt(StackFrame::STUB)));
+    __ j(not_equal, &no_interpreter_frame, Label::kNear);
+    __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+  // Drop arguments adaptor frame and load arguments count.
+  __ mov(ebp, scratch2);
+  __ mov(scratch1, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(scratch1);
+  __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ mov(scratch1, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(scratch1,
+         FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(
+      scratch1,
+      FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(scratch1);
+
+  __ bind(&formal_parameter_count_loaded);
+
+  // Calculate the destination address where we will put the return address
+  // after we drop current frame.
+  Register new_sp_reg = scratch2;
+  __ sub(scratch1, args_reg);
+  __ lea(new_sp_reg, Operand(ebp, scratch1, times_pointer_size,
+                             StandardFrameConstants::kCallerPCOffset));
+
+  if (FLAG_debug_code) {
+    __ cmp(esp, new_sp_reg);
+    __ Check(below, kStackAccessBelowStackPointer);
+  }
+
+  // Copy receiver and return address as well.
+  Register count_reg = scratch1;
+  __ lea(count_reg, Operand(args_reg, 2));
+
+  // Copy return address from caller's frame to current frame's return address
+  // to avoid its trashing and let the following loop copy it to the right
+  // place.
+  Register tmp_reg = scratch3;
+  __ mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+  __ mov(Operand(esp, 0), tmp_reg);
+
+  // Restore caller's frame pointer now as it could be overwritten by
+  // the copying loop.
+  __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+  Operand src(esp, count_reg, times_pointer_size, 0);
+  Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+  Label loop, entry;
+  __ jmp(&entry, Label::kNear);
+  __ bind(&loop);
+  __ dec(count_reg);
+  __ mov(tmp_reg, src);
+  __ mov(dst, tmp_reg);
+  __ bind(&entry);
+  __ cmp(count_reg, Immediate(0));
+  __ j(not_equal, &loop, Label::kNear);
+
+  // Leave current frame.
+  __ mov(esp, new_sp_reg);
+
+  __ bind(&done);
+}
+}  // namespace
 
 // static
 void Builtins::Generate_CallFunction(MacroAssembler* masm,
-                                     ConvertReceiverMode mode) {
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- edi : the function to call (checked to be a JSFunction)
@@ -1900,6 +2049,12 @@
   //  -- esi : the function context.
   // -----------------------------------
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+    // Reload shared function info.
+    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  }
+
   __ mov(ebx,
          FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
   __ SmiUntag(ebx);
@@ -2005,13 +2160,18 @@
 
 
 // static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- edi : the function to call (checked to be a JSBoundFunction)
   // -----------------------------------
   __ AssertBoundFunction(edi);
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+  }
+
   // Patch the receiver to [[BoundThis]].
   __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
   __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
@@ -2029,7 +2189,8 @@
 
 
 // static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- edi : the target to call (can be any Object).
@@ -2039,14 +2200,24 @@
   __ JumpIfSmi(edi, &non_callable);
   __ bind(&non_smi);
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
+  __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
        RelocInfo::CODE_TARGET);
   __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
-  __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
+  __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
        RelocInfo::CODE_TARGET);
+
+  // Check if target has a [[Call]] internal method.
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+  __ j(zero, &non_callable);
+
   __ CmpInstanceType(ecx, JS_PROXY_TYPE);
   __ j(not_equal, &non_function);
 
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+  }
+
   // 1. Runtime fallback for Proxy [[Call]].
   __ PopReturnAddressTo(ecx);
   __ Push(edi);
@@ -2061,15 +2232,12 @@
   // 2. Call to something else, which might have a [[Call]] internal method (if
   // not we raise an exception).
   __ bind(&non_function);
-  // Check if target has a [[Call]] internal method.
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
-  __ j(zero, &non_callable, Label::kNear);
   // Overwrite the original receiver with the (original) target.
   __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
   // Let the "call_as_function_delegate" take care of the rest.
   __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
   __ Jump(masm->isolate()->builtins()->CallFunction(
-              ConvertReceiverMode::kNotNullOrUndefined),
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
           RelocInfo::CODE_TARGET);
 
   // 3. Call to something that is not callable.
@@ -2387,14 +2555,12 @@
   // Load the next prototype.
   __ bind(&next_prototype);
   __ mov(receiver, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
-  // End if the prototype is null or not hidden.
-  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
-  __ j(equal, receiver_check_failed);
-  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ test(FieldOperand(scratch0, Map::kBitField3Offset),
-          Immediate(Map::IsHiddenPrototype::kMask));
+  __ test(FieldOperand(receiver, Map::kBitField3Offset),
+          Immediate(Map::HasHiddenPrototype::kMask));
   __ j(zero, receiver_check_failed);
+
+  __ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
+  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
   // Iterate.
   __ jmp(&prototype_loop_start, Label::kNear);
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 6e597e2..510b58e 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -613,7 +613,6 @@
   __ Cvtsi2sd(double_exponent, exponent);
 
   // Returning or bailing out.
-  Counters* counters = isolate()->counters();
   if (exponent_type() == ON_STACK) {
     // The arguments are still on the stack.
     __ bind(&call_runtime);
@@ -624,7 +623,6 @@
     __ bind(&done);
     __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
     __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
-    __ IncrementCounter(counters->math_pow(), 1);
     __ ret(2 * kPointerSize);
   } else {
     __ bind(&call_runtime);
@@ -644,7 +642,6 @@
     __ add(esp, Immediate(kDoubleSize));
 
     __ bind(&done);
-    __ IncrementCounter(counters->math_pow(), 1);
     __ ret(0);
   }
 }
@@ -730,456 +727,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  // The key is in edx and the parameter count is in eax.
-  DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
-  DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
-  // The displacement is used for skipping the frame pointer on the
-  // stack. It is the offset of the last parameter (if any) relative
-  // to the frame pointer.
-  static const int kDisplacement = 1 * kPointerSize;
-
-  // Check that the key is a smi.
-  Label slow;
-  __ JumpIfNotSmi(edx, &slow, Label::kNear);
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor;
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adaptor, Label::kNear);
-
-  // Check index against formal parameters count limit passed in
-  // through register eax. Use unsigned comparison to get negative
-  // check for free.
-  __ cmp(edx, eax);
-  __ j(above_equal, &slow, Label::kNear);
-
-  // Read the argument from the stack and return it.
-  STATIC_ASSERT(kSmiTagSize == 1);
-  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
-  __ lea(ebx, Operand(ebp, eax, times_2, 0));
-  __ neg(edx);
-  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
-  __ ret(0);
-
-  // Arguments adaptor case: Check index against actual arguments
-  // limit found in the arguments adaptor frame. Use unsigned
-  // comparison to get negative check for free.
-  __ bind(&adaptor);
-  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ cmp(edx, ecx);
-  __ j(above_equal, &slow, Label::kNear);
-
-  // Read the argument from the stack and return it.
-  STATIC_ASSERT(kSmiTagSize == 1);
-  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
-  __ lea(ebx, Operand(ebx, ecx, times_2, 0));
-  __ neg(edx);
-  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
-  __ ret(0);
-
-  // Slow-case: Handle non-smi or out-of-bounds access to arguments
-  // by calling the runtime system.
-  __ bind(&slow);
-  __ pop(ebx);  // Return address.
-  __ push(edx);
-  __ push(ebx);
-  __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
-  // ecx : number of parameters (tagged)
-  // edx : parameters pointer
-  // edi : function
-  // esp[0] : return address
-
-  DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &runtime, Label::kNear);
-
-  // Patch the arguments.length and the parameters pointer.
-  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ lea(edx,
-         Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
-  __ bind(&runtime);
-  __ pop(eax);   // Pop return address.
-  __ push(edi);  // Push function.
-  __ push(edx);  // Push parameters pointer.
-  __ push(ecx);  // Push parameter count.
-  __ push(eax);  // Push return address.
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
-  // ecx : number of parameters (tagged)
-  // edx : parameters pointer
-  // edi : function
-  // esp[0] : return address
-
-  DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adaptor_frame, Label::kNear);
-
-  // No adaptor, parameter count = argument count.
-  __ mov(ebx, ecx);
-  __ push(ecx);
-  __ jmp(&try_allocate, Label::kNear);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ mov(ebx, ecx);
-  __ push(ecx);
-  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ lea(edx, Operand(edx, ecx, times_2,
-                      StandardFrameConstants::kCallerSPOffset));
-
-  // ebx = parameter count (tagged)
-  // ecx = argument count (smi-tagged)
-  // Compute the mapped parameter count = min(ebx, ecx) in ebx.
-  __ cmp(ebx, ecx);
-  __ j(less_equal, &try_allocate, Label::kNear);
-  __ mov(ebx, ecx);
-
-  // Save mapped parameter count and function.
-  __ bind(&try_allocate);
-  __ push(edi);
-  __ push(ebx);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  Label no_parameter_map;
-  __ test(ebx, ebx);
-  __ j(zero, &no_parameter_map, Label::kNear);
-  __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
-  __ bind(&no_parameter_map);
-
-  // 2. Backing store.
-  __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
-
-  // eax = address of new object(s) (tagged)
-  // ecx = argument count (smi-tagged)
-  // esp[0] = mapped parameter count (tagged)
-  // esp[4] = function
-  // esp[8] = parameter count (tagged)
-  // Get the arguments map from the current native context into edi.
-  Label has_mapped_parameters, instantiate;
-  __ mov(edi, NativeContextOperand());
-  __ mov(ebx, Operand(esp, 0 * kPointerSize));
-  __ test(ebx, ebx);
-  __ j(not_zero, &has_mapped_parameters, Label::kNear);
-  __ mov(
-      edi,
-      Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
-  __ jmp(&instantiate, Label::kNear);
-
-  __ bind(&has_mapped_parameters);
-  __ mov(edi, Operand(edi, Context::SlotOffset(
-                               Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
-  __ bind(&instantiate);
-
-  // eax = address of new object (tagged)
-  // ebx = mapped parameter count (tagged)
-  // ecx = argument count (smi-tagged)
-  // edi = address of arguments map (tagged)
-  // esp[0] = mapped parameter count (tagged)
-  // esp[4] = function
-  // esp[8] = parameter count (tagged)
-  // Copy the JS object part.
-  __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
-  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-
-  // Set up the callee in-object property.
-  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
-  __ mov(edi, Operand(esp, 1 * kPointerSize));
-  __ AssertNotSmi(edi);
-  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
-                               Heap::kArgumentsCalleeIndex * kPointerSize),
-         edi);
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(ecx);
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
-                      Heap::kArgumentsLengthIndex * kPointerSize),
-         ecx);
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, edi will point there, otherwise to the
-  // backing store.
-  __ lea(edi, Operand(eax, Heap::kSloppyArgumentsObjectSize));
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
-
-  // eax = address of new object (tagged)
-  // ebx = mapped parameter count (tagged)
-  // ecx = argument count (tagged)
-  // edx = address of receiver argument
-  // edi = address of parameter map or backing store (tagged)
-  // esp[0] = mapped parameter count (tagged)
-  // esp[4] = function
-  // esp[8] = parameter count (tagged)
-  // Free two registers.
-  __ push(edx);
-  __ push(eax);
-
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ test(ebx, ebx);
-  __ j(zero, &skip_parameter_map);
-
-  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
-         Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
-  __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
-  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
-  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
-  __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
-  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop, parameters_test;
-  __ push(ecx);
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
-  __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
-  __ add(ebx, Operand(esp, 5 * kPointerSize));
-  __ sub(ebx, eax);
-  __ mov(ecx, isolate()->factory()->the_hole_value());
-  __ mov(edx, edi);
-  __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
-  // eax = loop variable (tagged)
-  // ebx = mapping index (tagged)
-  // ecx = the hole value
-  // edx = address of parameter map (tagged)
-  // edi = address of backing store (tagged)
-  // esp[0] = argument count (tagged)
-  // esp[4] = address of new object (tagged)
-  // esp[8] = address of receiver argument
-  // esp[12] = mapped parameter count (tagged)
-  // esp[16] = function
-  // esp[20] = parameter count (tagged)
-  __ jmp(&parameters_test, Label::kNear);
-
-  __ bind(&parameters_loop);
-  __ sub(eax, Immediate(Smi::FromInt(1)));
-  __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
-  __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
-  __ add(ebx, Immediate(Smi::FromInt(1)));
-  __ bind(&parameters_test);
-  __ test(eax, eax);
-  __ j(not_zero, &parameters_loop, Label::kNear);
-  __ pop(ecx);
-
-  __ bind(&skip_parameter_map);
-
-  // ecx = argument count (tagged)
-  // edi = address of backing store (tagged)
-  // esp[0] = address of new object (tagged)
-  // esp[4] = address of receiver argument
-  // esp[8] = mapped parameter count (tagged)
-  // esp[12] = function
-  // esp[16] = parameter count (tagged)
-  // Copy arguments header and remaining slots (if there are any).
-  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
-         Immediate(isolate()->factory()->fixed_array_map()));
-  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
-  Label arguments_loop, arguments_test;
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));
-  __ mov(edx, Operand(esp, 1 * kPointerSize));
-  __ sub(edx, ebx);  // Is there a smarter way to do negative scaling?
-  __ sub(edx, ebx);
-  __ jmp(&arguments_test, Label::kNear);
-
-  __ bind(&arguments_loop);
-  __ sub(edx, Immediate(kPointerSize));
-  __ mov(eax, Operand(edx, 0));
-  __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
-  __ add(ebx, Immediate(Smi::FromInt(1)));
-
-  __ bind(&arguments_test);
-  __ cmp(ebx, ecx);
-  __ j(less, &arguments_loop, Label::kNear);
-
-  // Restore.
-  __ pop(eax);  // Address of arguments object.
-  __ Drop(4);
-
-  // Return.
-  __ ret(0);
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ pop(eax);   // Remove saved mapped parameter count.
-  __ pop(edi);   // Pop saved function.
-  __ pop(eax);   // Remove saved parameter count.
-  __ pop(eax);   // Pop return address.
-  __ push(edi);  // Push function.
-  __ push(edx);  // Push parameters pointer.
-  __ push(ecx);  // Push parameter count.
-  __ push(eax);  // Push return address.
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
-  // ecx : number of parameters (tagged)
-  // edx : parameters pointer
-  // edi : function
-  // esp[0] : return address
-
-  DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label try_allocate, runtime;
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &try_allocate, Label::kNear);
-
-  // Patch the arguments.length and the parameters pointer.
-  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ lea(edx,
-         Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
-  // Try the new space allocation. Start out with computing the size of
-  // the arguments object and the elements array.
-  Label add_arguments_object;
-  __ bind(&try_allocate);
-  __ mov(eax, ecx);
-  __ test(eax, eax);
-  __ j(zero, &add_arguments_object, Label::kNear);
-  __ lea(eax, Operand(eax, times_2, FixedArray::kHeaderSize));
-  __ bind(&add_arguments_object);
-  __ add(eax, Immediate(Heap::kStrictArgumentsObjectSize));
-
-  // Do the allocation of both objects in one go.
-  __ Allocate(eax, eax, ebx, no_reg, &runtime, TAG_OBJECT);
-
-  // Get the arguments map from the current native context.
-  __ mov(edi, NativeContextOperand());
-  __ mov(edi, ContextOperand(edi, Context::STRICT_ARGUMENTS_MAP_INDEX));
-
-  __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
-  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-
-  // Get the length (smi tagged) and set that as an in-object property too.
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  __ AssertSmi(ecx);
-  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
-                      Heap::kArgumentsLengthIndex * kPointerSize),
-         ecx);
-
-  // If there are no actual arguments, we're done.
-  Label done;
-  __ test(ecx, ecx);
-  __ j(zero, &done, Label::kNear);
-
-  // Set up the elements pointer in the allocated arguments object and
-  // initialize the header in the elements fixed array.
-  __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
-  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
-         Immediate(isolate()->factory()->fixed_array_map()));
-  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
-  // Untag the length for the loop below.
-  __ SmiUntag(ecx);
-
-  // Copy the fixed array slots.
-  Label loop;
-  __ bind(&loop);
-  __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
-  __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
-  __ add(edi, Immediate(kPointerSize));
-  __ sub(edx, Immediate(kPointerSize));
-  __ dec(ecx);
-  __ j(not_zero, &loop);
-
-  // Return.
-  __ bind(&done);
-  __ ret(0);
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ pop(eax);   // Pop return address.
-  __ push(edi);  // Push function.
-  __ push(edx);  // Push parameters pointer.
-  __ push(ecx);  // Push parameter count.
-  __ push(eax);  // Push return address.
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
-  // ecx : number of parameters (tagged)
-  // edx : parameters pointer
-  // ebx : rest parameter index (tagged)
-  // esp[0] : return address
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
-  __ mov(edi, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(edi, StandardFrameConstants::kContextOffset));
-  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &runtime);
-
-  // Patch the arguments.length and the parameters pointer.
-  __ mov(ecx, Operand(edi, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ lea(edx,
-         Operand(edi, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
-  __ bind(&runtime);
-  __ pop(eax);   // Save return address.
-  __ push(ecx);  // Push number of parameters.
-  __ push(edx);  // Push parameters pointer.
-  __ push(ebx);  // Push rest parameter index.
-  __ push(eax);  // Push return address.
-  __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -1251,39 +798,37 @@
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ JumpIfSmi(eax, &runtime);
   __ mov(edx, eax);  // Make a copy of the original subject string.
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
 
   // eax: subject string
   // edx: subject string
-  // ebx: subject string instance type
   // ecx: RegExp data (FixedArray)
   // Handle subject string according to its encoding and representation:
   // (1) Sequential two byte?  If yes, go to (9).
-  // (2) Sequential one byte?  If yes, go to (6).
-  // (3) Anything but sequential or cons?  If yes, go to (7).
-  // (4) Cons string.  If the string is flat, replace subject with first string.
-  //     Otherwise bailout.
-  // (5a) Is subject sequential two byte?  If yes, go to (9).
-  // (5b) Is subject external?  If yes, go to (8).
-  // (6) One byte sequential.  Load regexp code for one byte.
+  // (2) Sequential one byte?  If yes, go to (5).
+  // (3) Sequential or cons?  If not, go to (6).
+  // (4) Cons string.  If the string is flat, replace subject with first string
+  //     and go to (1). Otherwise bail out to runtime.
+  // (5) One byte sequential.  Load regexp code for one byte.
   // (E) Carry on.
   /// [...]
 
   // Deferred code at the end of the stub:
-  // (7) Not a long external string?  If yes, go to (10).
-  // (8) External string.  Make it, offset-wise, look like a sequential string.
-  // (8a) Is the external string one byte?  If yes, go to (6).
-  // (9) Two byte sequential.  Load regexp code for one byte. Go to (E).
+  // (6) Long external string?  If not, go to (10).
+  // (7) External string.  Make it, offset-wise, look like a sequential string.
+  // (8) Is the external string one byte?  If yes, go to (5).
+  // (9) Two byte sequential.  Load regexp code for two byte. Go to (E).
   // (10) Short external string or not a string?  If yes, bail out to runtime.
-  // (11) Sliced string.  Replace subject with parent. Go to (5a).
+  // (11) Sliced string.  Replace subject with parent. Go to (1).
 
-  Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
-        external_string /* 8 */, check_underlying /* 5a */,
-        not_seq_nor_cons /* 7 */, check_code /* E */,
-        not_long_external /* 10 */;
+  Label seq_one_byte_string /* 5 */, seq_two_byte_string /* 9 */,
+      external_string /* 7 */, check_underlying /* 1 */,
+      not_seq_nor_cons /* 6 */, check_code /* E */, not_long_external /* 10 */;
 
+  __ bind(&check_underlying);
   // (1) Sequential two byte?  If yes, go to (9).
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
   __ and_(ebx, kIsNotStringMask |
                kStringRepresentationMask |
                kStringEncodingMask |
@@ -1291,14 +836,14 @@
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string);  // Go to (9).
 
-  // (2) Sequential one byte?  If yes, go to (6).
+  // (2) Sequential one byte?  If yes, go to (5).
   // Any other sequential string must be one byte.
   __ and_(ebx, Immediate(kIsNotStringMask |
                          kStringRepresentationMask |
                          kShortExternalStringMask));
-  __ j(zero, &seq_one_byte_string, Label::kNear);  // Go to (6).
+  __ j(zero, &seq_one_byte_string, Label::kNear);  // Go to (5).
 
-  // (3) Anything but sequential or cons?  If yes, go to (7).
+  // (3) Sequential or cons?  If not, go to (6).
   // We check whether the subject string is a cons, since sequential strings
   // have already been covered.
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
@@ -1306,32 +851,19 @@
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmp(ebx, Immediate(kExternalStringTag));
-  __ j(greater_equal, &not_seq_nor_cons);  // Go to (7).
+  __ j(greater_equal, &not_seq_nor_cons);  // Go to (6).
 
   // (4) Cons string.  Check that it's flat.
   // Replace subject with first string and reload instance type.
   __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
   __ j(not_equal, &runtime);
   __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
-  __ bind(&check_underlying);
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
-  // (5a) Is subject sequential two byte?  If yes, go to (9).
-  __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask);
-  STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
-  __ j(zero, &seq_two_byte_string);  // Go to (9).
-  // (5b) Is subject external?  If yes, go to (8).
-  __ test_b(ebx, kStringRepresentationMask);
-  // The underlying external string is never a short external string.
-  STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
-  STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
-  __ j(not_zero, &external_string);  // Go to (8).
+  __ jmp(&check_underlying);
 
   // eax: sequential subject string (or look-alike, external string)
   // edx: original subject string
   // ecx: RegExp data (FixedArray)
-  // (6) One byte sequential.  Load regexp code for one byte.
+  // (5) One byte sequential.  Load regexp code for one byte.
   __ bind(&seq_one_byte_string);
   // Load previous index and check range before edx is overwritten.  We have
   // to use edx instead of eax here because it might have been only made to
@@ -1558,12 +1090,12 @@
   __ TailCallRuntime(Runtime::kRegExpExec);
 
   // Deferred code for string handling.
-  // (7) Not a long external string?  If yes, go to (10).
+  // (6) Long external string?  If not, go to (10).
   __ bind(&not_seq_nor_cons);
   // Compare flags are still set from (3).
   __ j(greater, &not_long_external, Label::kNear);  // Go to (10).
 
-  // (8) External string.  Short external strings have been ruled out.
+  // (7) External string.  Short external strings have been ruled out.
   __ bind(&external_string);
   // Reload instance type.
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -1579,14 +1111,14 @@
   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  // (8a) Is the external string one byte?  If yes, go to (6).
+  // (8) Is the external string one byte?  If yes, go to (5).
   __ test_b(ebx, kStringEncodingMask);
-  __ j(not_zero, &seq_one_byte_string);  // Goto (6).
+  __ j(not_zero, &seq_one_byte_string);  // Go to (5).
 
   // eax: sequential subject string (or look-alike, external string)
   // edx: original subject string
   // ecx: RegExp data (FixedArray)
-  // (9) Two byte sequential.  Load regexp code for one byte. Go to (E).
+  // (9) Two byte sequential.  Load regexp code for two byte. Go to (E).
   __ bind(&seq_two_byte_string);
   // Load previous index and check range before edx is overwritten.  We have
   // to use edx instead of eax here because it might have been only made to
@@ -1606,11 +1138,11 @@
   __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
   __ j(not_zero, &runtime);
 
-  // (11) Sliced string.  Replace subject with parent.  Go to (5a).
+  // (11) Sliced string.  Replace subject with parent.  Go to (1).
   // Load offset into edi and replace subject string with parent.
   __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
   __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
-  __ jmp(&check_underlying);  // Go to (5a).
+  __ jmp(&check_underlying);  // Go to (1).
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -1689,16 +1221,11 @@
       // Check for undefined.  undefined OP undefined is false even though
       // undefined == undefined.
       __ cmp(edx, isolate()->factory()->undefined_value());
-      if (is_strong(strength())) {
-        // In strong mode, this comparison must throw, so call the runtime.
-        __ j(equal, &runtime_call, Label::kFar);
-      } else {
-        Label check_for_nan;
-        __ j(not_equal, &check_for_nan, Label::kNear);
-        __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
-        __ ret(0);
-        __ bind(&check_for_nan);
-      }
+      Label check_for_nan;
+      __ j(not_equal, &check_for_nan, Label::kNear);
+      __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+      __ ret(0);
+      __ bind(&check_for_nan);
     }
 
     // Test for NaN. Compare heap numbers in a general way,
@@ -1718,12 +1245,6 @@
       // Call runtime on identical SIMD values since we must throw a TypeError.
       __ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE));
       __ j(equal, &runtime_call, Label::kFar);
-      if (is_strong(strength())) {
-        // We have already tested for smis and heap numbers, so if both
-        // arguments are not strings we must proceed to the slow case.
-        __ test(ecx, Immediate(kIsNotStringMask));
-        __ j(not_zero, &runtime_call, Label::kFar);
-      }
     }
     __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
     __ ret(0);
@@ -1868,7 +1389,7 @@
     // Non-strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    Label return_unequal;
+    Label return_unequal, undetectable;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -1877,48 +1398,56 @@
     __ lea(ecx, Operand(eax, edx, times_1, 0));
     __ test(ecx, Immediate(kSmiTagMask));
     __ j(not_zero, &runtime_call, Label::kNear);
-    __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+
+    __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+    __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+
+    __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(not_zero, &undetectable, Label::kNear);
+    __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(not_zero, &return_unequal, Label::kNear);
+
+    __ CmpInstanceType(ebx, FIRST_JS_RECEIVER_TYPE);
     __ j(below, &runtime_call, Label::kNear);
-    __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ebx);
+    __ CmpInstanceType(ecx, FIRST_JS_RECEIVER_TYPE);
     __ j(below, &runtime_call, Label::kNear);
-    // We do not bail out after this point.  Both are JSObjects, and
-    // they are equal if and only if both are undetectable.
-    // The and of the undetectable flags is 1 if and only if they are equal.
+
+    __ bind(&return_unequal);
+    // Return non-equal by returning the non-zero object pointer in eax.
+    __ ret(0);  // eax, edx were pushed
+
+    __ bind(&undetectable);
     __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
               1 << Map::kIsUndetectable);
     __ j(zero, &return_unequal, Label::kNear);
-    __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
-    __ j(zero, &return_unequal, Label::kNear);
-    // The objects are both undetectable, so they both compare as the value
-    // undefined, and are equal.
     __ Move(eax, Immediate(EQUAL));
-    __ bind(&return_unequal);
-    // Return non-equal by returning the non-zero object pointer in eax,
-    // or return equal if we fell through to here.
-    __ ret(0);  // rax, rdx were pushed
+    __ ret(0);  // eax, edx were pushed
   }
   __ bind(&runtime_call);
 
-  // Push arguments below the return address.
-  __ pop(ecx);
-  __ push(edx);
-  __ push(eax);
-
-  // Figure out which native to call and setup the arguments.
   if (cc == equal) {
-    __ push(ecx);
-    __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(edx);
+      __ Push(eax);
+      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+    }
+    // Turn true into 0 and false into some non-zero value.
+    STATIC_ASSERT(EQUAL == 0);
+    __ sub(eax, Immediate(isolate()->factory()->true_value()));
+    __ Ret();
   } else {
+    // Push arguments below the return address.
+    __ pop(ecx);
+    __ push(edx);
+    __ push(eax);
     __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
-
-    // Restore return address on the stack.
     __ push(ecx);
-
     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
     // tagged as a small integer.
-    __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
-                                             : Runtime::kCompare);
+    __ TailCallRuntime(Runtime::kCompare);
   }
 
   __ bind(&miss);
@@ -2147,7 +1676,8 @@
 
   __ bind(&call_function);
   __ Set(eax, argc);
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+                                                    tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&extra_checks_or_miss);
@@ -2186,7 +1716,7 @@
 
   __ bind(&call);
   __ Set(eax, argc);
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&uninitialized);
@@ -2303,16 +1833,22 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
+  // Reserve space on the stack for the three arguments passed to the call. If
+  // result size is greater than can be returned in registers, also reserve
+  // space for the hidden argument for the result location, and space for the
+  // result itself.
+  int arg_stack_space = result_size() < 3 ? 3 : 4 + result_size();
+
   // Enter the exit frame that transitions from JavaScript to C++.
   if (argv_in_register()) {
     DCHECK(!save_doubles());
-    __ EnterApiExitFrame(3);
+    __ EnterApiExitFrame(arg_stack_space);
 
     // Move argc and argv into the correct registers.
     __ mov(esi, ecx);
     __ mov(edi, eax);
   } else {
-    __ EnterExitFrame(save_doubles());
+    __ EnterExitFrame(arg_stack_space, save_doubles());
   }
 
   // ebx: pointer to C function  (C callee-saved)
@@ -2327,14 +1863,36 @@
   if (FLAG_debug_code) {
     __ CheckStackAlignment();
   }
-
   // Call C function.
-  __ mov(Operand(esp, 0 * kPointerSize), edi);  // argc.
-  __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
-  __ mov(Operand(esp, 2 * kPointerSize),
-         Immediate(ExternalReference::isolate_address(isolate())));
+  if (result_size() <= 2) {
+    __ mov(Operand(esp, 0 * kPointerSize), edi);  // argc.
+    __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
+    __ mov(Operand(esp, 2 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(isolate())));
+  } else {
+    DCHECK_EQ(3, result_size());
+    // Pass a pointer to the result location as the first argument.
+    __ lea(eax, Operand(esp, 4 * kPointerSize));
+    __ mov(Operand(esp, 0 * kPointerSize), eax);
+    __ mov(Operand(esp, 1 * kPointerSize), edi);  // argc.
+    __ mov(Operand(esp, 2 * kPointerSize), esi);  // argv.
+    __ mov(Operand(esp, 3 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(isolate())));
+  }
   __ call(ebx);
-  // Result is in eax or edx:eax - do not destroy these registers!
+
+  if (result_size() > 2) {
+    DCHECK_EQ(3, result_size());
+#ifndef _WIN32
+    // Restore the "hidden" argument on the stack which was popped by caller.
+    __ sub(esp, Immediate(kPointerSize));
+#endif
+    // Read result values stored on stack. Result is stored above the arguments.
+    __ mov(kReturnRegister0, Operand(esp, 4 * kPointerSize));
+    __ mov(kReturnRegister1, Operand(esp, 5 * kPointerSize));
+    __ mov(kReturnRegister2, Operand(esp, 6 * kPointerSize));
+  }
+  // Result is in eax, edx:eax or edi:edx:eax - do not destroy these registers!
 
   // Check result for exception sentinel.
   Label exception_returned;
@@ -3130,6 +2688,42 @@
 }
 
 
+void ToNameStub::Generate(MacroAssembler* masm) {
+  // The ToName stub takes one argument in eax.
+  Label is_number;
+  __ JumpIfSmi(eax, &is_number, Label::kNear);
+
+  Label not_name;
+  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+  __ CmpObjectType(eax, LAST_NAME_TYPE, edi);
+  // eax: receiver
+  // edi: receiver map
+  __ j(above, &not_name, Label::kNear);
+  __ Ret();
+  __ bind(&not_name);
+
+  Label not_heap_number;
+  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ bind(&is_number);
+  NumberToStringStub stub(isolate());
+  __ TailCallStub(&stub);
+  __ bind(&not_heap_number);
+
+  Label not_oddball;
+  __ CmpInstanceType(edi, ODDBALL_TYPE);
+  __ j(not_equal, &not_oddball, Label::kNear);
+  __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+
+  __ pop(ecx);   // Pop return address.
+  __ push(eax);  // Push argument.
+  __ push(ecx);  // Push return address.
+  __ TailCallRuntime(Runtime::kToName);
+}
+
+
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -3333,21 +2927,17 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
   __ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
-  if (op() != Token::EQ_STRICT && is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
-  } else {
-    if (!Token::IsEqualityOp(op())) {
-      __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
-      __ AssertSmi(eax);
-      __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
-      __ AssertSmi(edx);
-      __ push(eax);
-      __ mov(eax, edx);
-      __ pop(edx);
-    }
-    __ sub(eax, edx);
-    __ Ret();
+  if (!Token::IsEqualityOp(op())) {
+    __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
+    __ AssertSmi(eax);
+    __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
+    __ AssertSmi(edx);
+    __ push(eax);
+    __ mov(eax, edx);
+    __ pop(edx);
   }
+  __ sub(eax, edx);
+  __ Ret();
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -3437,7 +3027,7 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
                      CompareICState::GENERIC, CompareICState::GENERIC);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
@@ -3680,8 +3270,6 @@
   if (Token::IsEqualityOp(op())) {
     __ sub(eax, edx);
     __ ret(0);
-  } else if (is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   } else {
     __ PopReturnAddressTo(ecx);
     __ Push(edx);
@@ -3985,11 +3573,8 @@
                            regs_.scratch0(),
                            &dont_need_remembered_set);
 
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch0(),
-                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                     not_zero,
-                     &dont_need_remembered_set);
+    __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+                        &dont_need_remembered_set);
 
     // First notify the incremental marker if necessary, then update the
     // remembered set.
@@ -5133,6 +4718,644 @@
 }
 
 
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edi    : target
+  //  -- edx    : new target
+  //  -- esi    : context
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(edi);
+  __ AssertReceiver(edx);
+
+  // Verify that the new target is a JSFunction.
+  Label new_object;
+  __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
+  __ j(not_equal, &new_object);
+
+  // Load the initial map and verify that it's in fact a map.
+  __ mov(ecx, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
+  __ JumpIfSmi(ecx, &new_object);
+  __ CmpObjectType(ecx, MAP_TYPE, ebx);
+  __ j(not_equal, &new_object);
+
+  // Fall back to runtime if the target differs from the new target's
+  // initial map constructor.
+  __ cmp(edi, FieldOperand(ecx, Map::kConstructorOrBackPointerOffset));
+  __ j(not_equal, &new_object);
+
+  // Allocate the JSObject on the heap.
+  Label allocate, done_allocate;
+  __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
+  __ lea(ebx, Operand(ebx, times_pointer_size, 0));
+  __ Allocate(ebx, eax, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
+  __ bind(&done_allocate);
+
+  // Initialize the JSObject fields.
+  __ mov(Operand(eax, JSObject::kMapOffset), ecx);
+  __ mov(Operand(eax, JSObject::kPropertiesOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+  __ mov(Operand(eax, JSObject::kElementsOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+  __ lea(ebx, Operand(eax, JSObject::kHeaderSize));
+
+  // ----------- S t a t e -------------
+  //  -- eax    : result (untagged)
+  //  -- ebx    : result fields (untagged)
+  //  -- edi    : result end (untagged)
+  //  -- ecx    : initial map
+  //  -- esi    : context
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  // Perform in-object slack tracking if requested.
+  Label slack_tracking;
+  STATIC_ASSERT(Map::kNoSlackTracking == 0);
+  __ test(FieldOperand(ecx, Map::kBitField3Offset),
+          Immediate(Map::ConstructionCounter::kMask));
+  __ j(not_zero, &slack_tracking, Label::kNear);
+  {
+    // Initialize all in-object fields with undefined.
+    __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+    __ InitializeFieldsWithFiller(ebx, edi, edx);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ inc(eax);
+    __ Ret();
+  }
+  __ bind(&slack_tracking);
+  {
+    // Decrease generous allocation count.
+    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+    __ sub(FieldOperand(ecx, Map::kBitField3Offset),
+           Immediate(1 << Map::ConstructionCounter::kShift));
+
+    // Initialize the in-object fields with undefined.
+    __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
+    __ neg(edx);
+    __ lea(edx, Operand(edi, edx, times_pointer_size, 0));
+    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+    __ InitializeFieldsWithFiller(ebx, edx, edi);
+
+    // Initialize the remaining (reserved) fields with one pointer filler map.
+    __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
+    __ lea(edx, Operand(ebx, edx, times_pointer_size, 0));
+    __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
+    __ InitializeFieldsWithFiller(ebx, edx, edi);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ inc(eax);
+
+    // Check if we can finalize the instance size.
+    Label finalize;
+    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+    __ test(FieldOperand(ecx, Map::kBitField3Offset),
+            Immediate(Map::ConstructionCounter::kMask));
+    __ j(zero, &finalize, Label::kNear);
+    __ Ret();
+
+    // Finalize the instance size.
+    __ bind(&finalize);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(eax);
+      __ Push(ecx);
+      __ CallRuntime(Runtime::kFinalizeInstanceSize);
+      __ Pop(eax);
+    }
+    __ Ret();
+  }
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(ebx);
+    __ Push(ecx);
+    __ Push(ebx);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Pop(ecx);
+  }
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ dec(eax);
+  __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
+  __ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
+  __ jmp(&done_allocate);
+
+  // Fall back to %NewObject.
+  __ bind(&new_object);
+  __ PopReturnAddressTo(ecx);
+  __ Push(edi);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+  __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edi    : function
+  //  -- esi    : context
+  //  -- ebp    : frame pointer
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make edx point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ mov(edx, ebp);
+    __ jmp(&loop_entry, Label::kNear);
+    __ bind(&loop);
+    __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+    __ j(not_equal, &loop);
+  }
+
+  // Check if we have rest parameters (only possible if we have an
+  // arguments adaptor frame below the function frame).
+  Label no_rest_parameters;
+  __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &no_rest_parameters, Label::kNear);
+
+  // Check if the arguments adaptor frame contains more arguments than
+  // specified by the function's internal formal parameter count.
+  Label rest_parameters;
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ sub(eax,
+         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ j(greater, &rest_parameters);
+
+  // Return an empty rest parameter array.
+  __ bind(&no_rest_parameters);
+  {
+    // ----------- S t a t e -------------
+    //  -- esi    : context
+    //  -- esp[0] : return address
+    // -----------------------------------
+
+    // Allocate an empty rest parameter array.
+    Label allocate, done_allocate;
+    __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the rest parameter array in rax.
+    __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
+    __ mov(FieldOperand(eax, JSArray::kMapOffset), ecx);
+    __ mov(ecx, isolate()->factory()->empty_fixed_array());
+    __ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx);
+    __ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx);
+    __ mov(FieldOperand(eax, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(0)));
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(Smi::FromInt(JSArray::kSize));
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+    }
+    __ jmp(&done_allocate);
+  }
+
+  __ bind(&rest_parameters);
+  {
+    // Compute the pointer to the first rest parameter (skippping the receiver).
+    __ lea(ebx,
+           Operand(ebx, eax, times_half_pointer_size,
+                   StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+
+    // ----------- S t a t e -------------
+    //  -- esi    : context
+    //  -- eax    : number of rest parameters (tagged)
+    //  -- ebx    : pointer to first rest parameters
+    //  -- esp[0] : return address
+    // -----------------------------------
+
+    // Allocate space for the rest parameter array plus the backing store.
+    Label allocate, done_allocate;
+    __ lea(ecx, Operand(eax, times_half_pointer_size,
+                        JSArray::kSize + FixedArray::kHeaderSize));
+    __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the elements array in edx.
+    __ mov(FieldOperand(edx, FixedArray::kMapOffset),
+           isolate()->factory()->fixed_array_map());
+    __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
+    {
+      Label loop, done_loop;
+      __ Move(ecx, Smi::FromInt(0));
+      __ bind(&loop);
+      __ cmp(ecx, eax);
+      __ j(equal, &done_loop, Label::kNear);
+      __ mov(edi, Operand(ebx, 0 * kPointerSize));
+      __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
+                          FixedArray::kHeaderSize),
+             edi);
+      __ sub(ebx, Immediate(1 * kPointerSize));
+      __ add(ecx, Immediate(Smi::FromInt(1)));
+      __ jmp(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Setup the rest parameter array in edi.
+    __ lea(edi,
+           Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
+    __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
+    __ mov(FieldOperand(edi, JSArray::kMapOffset), ecx);
+    __ mov(FieldOperand(edi, JSArray::kPropertiesOffset),
+           isolate()->factory()->empty_fixed_array());
+    __ mov(FieldOperand(edi, JSArray::kElementsOffset), edx);
+    __ mov(FieldOperand(edi, JSArray::kLengthOffset), eax);
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ mov(eax, edi);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(ecx);
+      __ Push(eax);
+      __ Push(ebx);
+      __ Push(ecx);
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+      __ mov(edx, eax);
+      __ Pop(ebx);
+      __ Pop(eax);
+    }
+    __ jmp(&done_allocate);
+  }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edi    : function
+  //  -- esi    : context
+  //  -- ebp    : frame pointer
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx,
+         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ lea(edx, Operand(ebp, ecx, times_half_pointer_size,
+                      StandardFrameConstants::kCallerSPOffset));
+
+  // ecx : number of parameters (tagged)
+  // edx : parameters pointer
+  // edi : function
+  // esp[0] : return address
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adaptor_frame, Label::kNear);
+
+  // No adaptor, parameter count = argument count.
+  __ mov(ebx, ecx);
+  __ push(ecx);
+  __ jmp(&try_allocate, Label::kNear);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ mov(ebx, ecx);
+  __ push(ecx);
+  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ lea(edx, Operand(edx, ecx, times_2,
+                      StandardFrameConstants::kCallerSPOffset));
+
+  // ebx = parameter count (tagged)
+  // ecx = argument count (smi-tagged)
+  // Compute the mapped parameter count = min(ebx, ecx) in ebx.
+  __ cmp(ebx, ecx);
+  __ j(less_equal, &try_allocate, Label::kNear);
+  __ mov(ebx, ecx);
+
+  // Save mapped parameter count and function.
+  __ bind(&try_allocate);
+  __ push(edi);
+  __ push(ebx);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  Label no_parameter_map;
+  __ test(ebx, ebx);
+  __ j(zero, &no_parameter_map, Label::kNear);
+  __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
+  __ bind(&no_parameter_map);
+
+  // 2. Backing store.
+  __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
+
+  // Do the allocation of all three objects in one go.
+  __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
+
+  // eax = address of new object(s) (tagged)
+  // ecx = argument count (smi-tagged)
+  // esp[0] = mapped parameter count (tagged)
+  // esp[4] = function
+  // esp[8] = parameter count (tagged)
+  // Get the arguments map from the current native context into edi.
+  Label has_mapped_parameters, instantiate;
+  __ mov(edi, NativeContextOperand());
+  __ mov(ebx, Operand(esp, 0 * kPointerSize));
+  __ test(ebx, ebx);
+  __ j(not_zero, &has_mapped_parameters, Label::kNear);
+  __ mov(
+      edi,
+      Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
+  __ jmp(&instantiate, Label::kNear);
+
+  __ bind(&has_mapped_parameters);
+  __ mov(edi, Operand(edi, Context::SlotOffset(
+                               Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
+  __ bind(&instantiate);
+
+  // eax = address of new object (tagged)
+  // ebx = mapped parameter count (tagged)
+  // ecx = argument count (smi-tagged)
+  // edi = address of arguments map (tagged)
+  // esp[0] = mapped parameter count (tagged)
+  // esp[4] = function
+  // esp[8] = parameter count (tagged)
+  // Copy the JS object part.
+  __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+
+  // Set up the callee in-object property.
+  STATIC_ASSERT(JSSloppyArgumentsObject::kCalleeIndex == 1);
+  __ mov(edi, Operand(esp, 1 * kPointerSize));
+  __ AssertNotSmi(edi);
+  __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kCalleeOffset), edi);
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(ecx);
+  __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kLengthOffset), ecx);
+
+  // Set up the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, edi will point there, otherwise to the
+  // backing store.
+  __ lea(edi, Operand(eax, JSSloppyArgumentsObject::kSize));
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+
+  // eax = address of new object (tagged)
+  // ebx = mapped parameter count (tagged)
+  // ecx = argument count (tagged)
+  // edx = address of receiver argument
+  // edi = address of parameter map or backing store (tagged)
+  // esp[0] = mapped parameter count (tagged)
+  // esp[4] = function
+  // esp[8] = parameter count (tagged)
+  // Free two registers.
+  __ push(edx);
+  __ push(eax);
+
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  __ test(ebx, ebx);
+  __ j(zero, &skip_parameter_map);
+
+  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+         Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
+  __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
+  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
+  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
+  __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
+  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+  __ push(ecx);
+  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+  __ add(ebx, Operand(esp, 5 * kPointerSize));
+  __ sub(ebx, eax);
+  __ mov(ecx, isolate()->factory()->the_hole_value());
+  __ mov(edx, edi);
+  __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
+  // eax = loop variable (tagged)
+  // ebx = mapping index (tagged)
+  // ecx = the hole value
+  // edx = address of parameter map (tagged)
+  // edi = address of backing store (tagged)
+  // esp[0] = argument count (tagged)
+  // esp[4] = address of new object (tagged)
+  // esp[8] = address of receiver argument
+  // esp[12] = mapped parameter count (tagged)
+  // esp[16] = function
+  // esp[20] = parameter count (tagged)
+  __ jmp(&parameters_test, Label::kNear);
+
+  __ bind(&parameters_loop);
+  __ sub(eax, Immediate(Smi::FromInt(1)));
+  __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
+  __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
+  __ add(ebx, Immediate(Smi::FromInt(1)));
+  __ bind(&parameters_test);
+  __ test(eax, eax);
+  __ j(not_zero, &parameters_loop, Label::kNear);
+  __ pop(ecx);
+
+  __ bind(&skip_parameter_map);
+
+  // ecx = argument count (tagged)
+  // edi = address of backing store (tagged)
+  // esp[0] = address of new object (tagged)
+  // esp[4] = address of receiver argument
+  // esp[8] = mapped parameter count (tagged)
+  // esp[12] = function
+  // esp[16] = parameter count (tagged)
+  // Copy arguments header and remaining slots (if there are any).
+  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+         Immediate(isolate()->factory()->fixed_array_map()));
+  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
+  Label arguments_loop, arguments_test;
+  __ mov(ebx, Operand(esp, 2 * kPointerSize));
+  __ mov(edx, Operand(esp, 1 * kPointerSize));
+  __ sub(edx, ebx);  // Is there a smarter way to do negative scaling?
+  __ sub(edx, ebx);
+  __ jmp(&arguments_test, Label::kNear);
+
+  __ bind(&arguments_loop);
+  __ sub(edx, Immediate(kPointerSize));
+  __ mov(eax, Operand(edx, 0));
+  __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
+  __ add(ebx, Immediate(Smi::FromInt(1)));
+
+  __ bind(&arguments_test);
+  __ cmp(ebx, ecx);
+  __ j(less, &arguments_loop, Label::kNear);
+
+  // Restore.
+  __ pop(eax);  // Address of arguments object.
+  __ Drop(4);
+
+  // Return.
+  __ ret(0);
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  __ pop(eax);   // Remove saved mapped parameter count.
+  __ pop(edi);   // Pop saved function.
+  __ pop(eax);   // Remove saved parameter count.
+  __ pop(eax);   // Pop return address.
+  __ push(edi);  // Push function.
+  __ push(edx);  // Push parameters pointer.
+  __ push(ecx);  // Push parameter count.
+  __ push(eax);  // Push return address.
+  __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edi    : function
+  //  -- esi    : context
+  //  -- ebp    : frame pointer
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make edx point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ mov(edx, ebp);
+    __ jmp(&loop_entry, Label::kNear);
+    __ bind(&loop);
+    __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+    __ j(not_equal, &loop);
+  }
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &arguments_adaptor, Label::kNear);
+  {
+    __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(eax,
+           FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ lea(ebx,
+           Operand(edx, eax, times_half_pointer_size,
+                   StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+  }
+  __ jmp(&arguments_done, Label::kNear);
+  __ bind(&arguments_adaptor);
+  {
+    __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ lea(ebx,
+           Operand(ebx, eax, times_half_pointer_size,
+                   StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+  }
+  __ bind(&arguments_done);
+
+  // ----------- S t a t e -------------
+  //  -- eax    : number of arguments (tagged)
+  //  -- ebx    : pointer to the first argument
+  //  -- esi    : context
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  // Allocate space for the strict arguments object plus the backing store.
+  Label allocate, done_allocate;
+  __ lea(ecx,
+         Operand(eax, times_half_pointer_size,
+                 JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+  __ bind(&done_allocate);
+
+  // Setup the elements array in edx.
+  __ mov(FieldOperand(edx, FixedArray::kMapOffset),
+         isolate()->factory()->fixed_array_map());
+  __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
+  {
+    Label loop, done_loop;
+    __ Move(ecx, Smi::FromInt(0));
+    __ bind(&loop);
+    __ cmp(ecx, eax);
+    __ j(equal, &done_loop, Label::kNear);
+    __ mov(edi, Operand(ebx, 0 * kPointerSize));
+    __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
+                        FixedArray::kHeaderSize),
+           edi);
+    __ sub(ebx, Immediate(1 * kPointerSize));
+    __ add(ecx, Immediate(Smi::FromInt(1)));
+    __ jmp(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Setup the rest parameter array in edi.
+  __ lea(edi,
+         Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
+  __ LoadGlobalFunction(Context::STRICT_ARGUMENTS_MAP_INDEX, ecx);
+  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kMapOffset), ecx);
+  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kPropertiesOffset),
+         isolate()->factory()->empty_fixed_array());
+  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kElementsOffset), edx);
+  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kLengthOffset), eax);
+  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+  __ mov(eax, edi);
+  __ Ret();
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(ecx);
+    __ Push(eax);
+    __ Push(ebx);
+    __ Push(ecx);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ mov(edx, eax);
+    __ Pop(ebx);
+    __ Pop(eax);
+  }
+  __ jmp(&done_allocate);
+}
+
+
 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
   Register context_reg = esi;
   Register slot_reg = ebx;
@@ -5470,11 +5693,10 @@
   __ jmp(&leave_exit_frame);
 }
 
-
 static void CallApiFunctionStubHelper(MacroAssembler* masm,
                                       const ParameterCount& argc,
                                       bool return_first_arg,
-                                      bool call_data_undefined) {
+                                      bool call_data_undefined, bool is_lazy) {
   // ----------- S t a t e -------------
   //  -- edi                 : callee
   //  -- ebx                 : call_data
@@ -5548,8 +5770,10 @@
   // push return address
   __ push(return_address);
 
-  // load context from callee
-  __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+  if (!is_lazy) {
+    // load context from callee
+    __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+  }
 
   // API function gets reference to the v8::Arguments. If CPU profiler
   // is enabled wrapper function will be called and we need to pass
@@ -5621,7 +5845,7 @@
 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
   bool call_data_undefined = this->call_data_undefined();
   CallApiFunctionStubHelper(masm, ParameterCount(eax), false,
-                            call_data_undefined);
+                            call_data_undefined, false);
 }
 
 
@@ -5629,45 +5853,58 @@
   bool is_store = this->is_store();
   int argc = this->argc();
   bool call_data_undefined = this->call_data_undefined();
+  bool is_lazy = this->is_lazy();
   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined);
+                            call_data_undefined, is_lazy);
 }
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- esp[0]                  : return address
-  //  -- esp[4]                  : name
-  //  -- esp[8 - kArgsLength*4]  : PropertyCallbackArguments object
+  //  -- esp[0]                        : return address
+  //  -- esp[4]                        : name
+  //  -- esp[8 .. (8 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
   //  -- ...
-  //  -- edx                    : api_function_address
+  //  -- edx                           : api_function_address
   // -----------------------------------
   DCHECK(edx.is(ApiGetterDescriptor::function_address()));
 
-  // array for v8::Arguments::values_, handler for name and pointer
-  // to the values (it considered as smi in GC).
-  const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
-  // Allocate space for opional callback address parameter in case
-  // CPU profiler is active.
-  const int kApiArgc = 2 + 1;
+  // v8::PropertyCallbackInfo::args_ array and name handle.
+  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+  // Allocate v8::PropertyCallbackInfo object, arguments for callback and
+  // space for optional callback address parameter (in case CPU profiler is
+  // active) in non-GCed stack space.
+  const int kApiArgc = 3 + 1;
 
   Register api_function_address = edx;
   Register scratch = ebx;
 
-  // load address of name
-  __ lea(scratch, Operand(esp, 1 * kPointerSize));
+  // Load address of v8::PropertyAccessorInfo::args_ array.
+  __ lea(scratch, Operand(esp, 2 * kPointerSize));
 
   PrepareCallApiFunction(masm, kApiArgc);
+  // Create v8::PropertyCallbackInfo object on the stack and initialize
+  // it's args_ field.
+  Operand info_object = ApiParameterOperand(3);
+  __ mov(info_object, scratch);
+
+  __ sub(scratch, Immediate(kPointerSize));
   __ mov(ApiParameterOperand(0), scratch);  // name.
-  __ add(scratch, Immediate(kPointerSize));
+  __ lea(scratch, info_object);
   __ mov(ApiParameterOperand(1), scratch);  // arguments pointer.
+  // Reserve space for optional callback address parameter.
+  Operand thunk_last_arg = ApiParameterOperand(2);
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
+  // +3 is to skip prolog, return address and name handle.
+  Operand return_value_operand(
+      ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
-                           ApiParameterOperand(2), kStackSpace, nullptr,
-                           Operand(ebp, 7 * kPointerSize), NULL);
+                           thunk_last_arg, kStackUnwindSpace, nullptr,
+                           return_value_operand, NULL);
 }
 
 
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 121d12f..fc813f5 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -274,32 +274,12 @@
     // registers are eax, ecx and edx.  The three scratch registers (incl. ecx)
     // will be restored by other means so we don't bother pushing them here.
     void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
-      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
-      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
-      if (mode == kSaveFPRegs) {
-        masm->sub(esp,
-                  Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
-        // Save all XMM registers except XMM0.
-        for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
-          XMMRegister reg = XMMRegister::from_code(i);
-          masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
-        }
-      }
+      masm->PushCallerSaved(mode, ecx, scratch0_, scratch1_);
     }
 
-    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+    inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
                                            SaveFPRegsMode mode) {
-      if (mode == kSaveFPRegs) {
-        // Restore all XMM registers except XMM0.
-        for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
-          XMMRegister reg = XMMRegister::from_code(i);
-          masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
-        }
-        masm->add(esp,
-                  Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
-      }
-      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
-      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
+      masm->PopCallerSaved(mode, ecx, scratch0_, scratch1_);
     }
 
     inline Register object() { return object_; }
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index efe6476..a3756ae 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -169,27 +169,6 @@
 }
 
 
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  // Set the register values. The values are not important as there are no
-  // callee saved registers in JavaScript frames, so all registers are
-  // spilled. Registers ebp and esp are set to the correct values though.
-
-  for (int i = 0; i < Register::kNumRegisters; i++) {
-    input_->SetRegister(i, i * 4);
-  }
-  input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
-  input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
-    input_->SetDoubleRegister(i, 0.0);
-  }
-
-  // Fill the frame content from the actual data on the frame.
-  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
-    input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
-  }
-}
-
-
 void Deoptimizer::SetPlatformCompiledStubRegisters(
     FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
   intptr_t handler =
@@ -207,10 +186,8 @@
   }
 }
 
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
-  int parameter_count =
-      function->shared()->internal_formal_parameter_count() + 1;
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
+  int parameter_count = shared->internal_formal_parameter_count() + 1;
   unsigned input_frame_size = input_->GetFrameSize();
   unsigned alignment_state_offset =
       input_frame_size - parameter_count * kPointerSize -
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 5a43280..b11ff97 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -28,32 +28,30 @@
   OperandOrder op_order_;
 };
 
-
 static const ByteMnemonic two_operands_instr[] = {
-  {0x01, "add", OPER_REG_OP_ORDER},
-  {0x03, "add", REG_OPER_OP_ORDER},
-  {0x09, "or", OPER_REG_OP_ORDER},
-  {0x0B, "or", REG_OPER_OP_ORDER},
-  {0x1B, "sbb", REG_OPER_OP_ORDER},
-  {0x21, "and", OPER_REG_OP_ORDER},
-  {0x23, "and", REG_OPER_OP_ORDER},
-  {0x29, "sub", OPER_REG_OP_ORDER},
-  {0x2A, "subb", REG_OPER_OP_ORDER},
-  {0x2B, "sub", REG_OPER_OP_ORDER},
-  {0x31, "xor", OPER_REG_OP_ORDER},
-  {0x33, "xor", REG_OPER_OP_ORDER},
-  {0x38, "cmpb", OPER_REG_OP_ORDER},
-  {0x3A, "cmpb", REG_OPER_OP_ORDER},
-  {0x3B, "cmp", REG_OPER_OP_ORDER},
-  {0x84, "test_b", REG_OPER_OP_ORDER},
-  {0x85, "test", REG_OPER_OP_ORDER},
-  {0x87, "xchg", REG_OPER_OP_ORDER},
-  {0x8A, "mov_b", REG_OPER_OP_ORDER},
-  {0x8B, "mov", REG_OPER_OP_ORDER},
-  {0x8D, "lea", REG_OPER_OP_ORDER},
-  {-1, "", UNSET_OP_ORDER}
-};
-
+    {0x01, "add", OPER_REG_OP_ORDER},
+    {0x03, "add", REG_OPER_OP_ORDER},
+    {0x09, "or", OPER_REG_OP_ORDER},
+    {0x0B, "or", REG_OPER_OP_ORDER},
+    {0x1B, "sbb", REG_OPER_OP_ORDER},
+    {0x21, "and", OPER_REG_OP_ORDER},
+    {0x23, "and", REG_OPER_OP_ORDER},
+    {0x29, "sub", OPER_REG_OP_ORDER},
+    {0x2A, "subb", REG_OPER_OP_ORDER},
+    {0x2B, "sub", REG_OPER_OP_ORDER},
+    {0x31, "xor", OPER_REG_OP_ORDER},
+    {0x33, "xor", REG_OPER_OP_ORDER},
+    {0x38, "cmpb", OPER_REG_OP_ORDER},
+    {0x39, "cmp", OPER_REG_OP_ORDER},
+    {0x3A, "cmpb", REG_OPER_OP_ORDER},
+    {0x3B, "cmp", REG_OPER_OP_ORDER},
+    {0x84, "test_b", REG_OPER_OP_ORDER},
+    {0x85, "test", REG_OPER_OP_ORDER},
+    {0x87, "xchg", REG_OPER_OP_ORDER},
+    {0x8A, "mov_b", REG_OPER_OP_ORDER},
+    {0x8B, "mov", REG_OPER_OP_ORDER},
+    {0x8D, "lea", REG_OPER_OP_ORDER},
+    {-1, "", UNSET_OP_ORDER}};
 
 static const ByteMnemonic zero_operands_instr[] = {
   {0xC3, "ret", UNSET_OP_ORDER},
@@ -1630,7 +1628,15 @@
             }
           } else if (*data == 0x3A) {
             data++;
-            if (*data == 0x0B) {
+            if (*data == 0x0A) {
+              data++;
+              int mod, regop, rm;
+              get_modrm(*data, &mod, &regop, &rm);
+              int8_t imm8 = static_cast<int8_t>(data[1]);
+              AppendToBuffer("roundss %s,%s,%d", NameOfXMMRegister(regop),
+                             NameOfXMMRegister(rm), static_cast<int>(imm8));
+              data += 2;
+            } else if (*data == 0x0B) {
               data++;
               int mod, regop, rm;
               get_modrm(*data, &mod, &regop, &rm);
diff --git a/src/ia32/interface-descriptors-ia32.cc b/src/ia32/interface-descriptors-ia32.cc
index ad381c7..b36cf63 100644
--- a/src/ia32/interface-descriptors-ia32.cc
+++ b/src/ia32/interface-descriptors-ia32.cc
@@ -59,20 +59,6 @@
 const Register StringCompareDescriptor::RightRegister() { return eax; }
 
 
-const Register ArgumentsAccessReadDescriptor::index() { return edx; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return eax; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return edi; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return ecx; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return edx; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return ecx; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return edx; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return ebx; }
-
-
 const Register ApiGetterDescriptor::function_address() { return edx; }
 
 
@@ -101,6 +87,32 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi, edx};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
 
 void ToNumberDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -119,6 +131,10 @@
 
 
 // static
+const Register ToNameDescriptor::ReceiverRegister() { return eax; }
+
+
+// static
 const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
 
 
@@ -171,13 +187,6 @@
 }
 
 
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {ecx, eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void CallFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {edi};
@@ -413,6 +422,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -424,7 +440,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
@@ -436,7 +451,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterCEntryDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 5f80b4d..12daec8 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -120,29 +120,62 @@
   Push(isolate()->heap()->root_handle(index));
 }
 
+#define REG(Name) \
+  { Register::kCode_##Name }
 
-void MacroAssembler::InNewSpace(
-    Register object,
-    Register scratch,
-    Condition cc,
-    Label* condition_met,
-    Label::Distance condition_met_distance) {
-  DCHECK(cc == equal || cc == not_equal);
-  if (scratch.is(object)) {
-    and_(scratch, Immediate(~Page::kPageAlignmentMask));
-  } else {
-    mov(scratch, Immediate(~Page::kPageAlignmentMask));
-    and_(scratch, object);
+static const Register saved_regs[] = {REG(eax), REG(ecx), REG(edx)};
+
+#undef REG
+
+static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+
+void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+                                     Register exclusion1, Register exclusion2,
+                                     Register exclusion3) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  for (int i = 0; i < kNumberOfSavedRegs; i++) {
+    Register reg = saved_regs[i];
+    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+      push(reg);
+    }
   }
-  // Check that we can use a test_b.
-  DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
-  DCHECK(MemoryChunk::IN_TO_SPACE < 8);
-  int mask = (1 << MemoryChunk::IN_FROM_SPACE)
-           | (1 << MemoryChunk::IN_TO_SPACE);
-  // If non-zero, the page belongs to new-space.
-  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
-         static_cast<uint8_t>(mask));
-  j(cc, condition_met, condition_met_distance);
+  if (fp_mode == kSaveFPRegs) {
+    sub(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
+    // Save all XMM registers except XMM0.
+    for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
+    }
+  }
+}
+
+void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+                                    Register exclusion2, Register exclusion3) {
+  if (fp_mode == kSaveFPRegs) {
+    // Restore all XMM registers except XMM0.
+    for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
+    }
+    add(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
+  }
+
+  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
+    Register reg = saved_regs[i];
+    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+      pop(reg);
+    }
+  }
+}
+
+void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
+                                Label* condition_met,
+                                Label::Distance distance) {
+  const int mask =
+      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+  CheckPageFlag(object, scratch, mask, cc, condition_met, distance);
 }
 
 
@@ -571,6 +604,75 @@
   }
 }
 
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+                                               Register code_entry,
+                                               Register scratch) {
+  const int offset = JSFunction::kCodeEntryOffset;
+
+  // Since a code entry (value) is always in old space, we don't need to update
+  // remembered set. If incremental marking is off, there is nothing for us to
+  // do.
+  if (!FLAG_incremental_marking) return;
+
+  DCHECK(!js_function.is(code_entry));
+  DCHECK(!js_function.is(scratch));
+  DCHECK(!code_entry.is(scratch));
+  AssertNotSmi(js_function);
+
+  if (emit_debug_code()) {
+    Label ok;
+    lea(scratch, FieldOperand(js_function, offset));
+    cmp(code_entry, Operand(scratch, 0));
+    j(equal, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+
+  CheckPageFlag(code_entry, scratch,
+                MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
+                Label::kNear);
+  CheckPageFlag(js_function, scratch,
+                MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
+                Label::kNear);
+
+  // Save input registers.
+  push(js_function);
+  push(code_entry);
+
+  const Register dst = scratch;
+  lea(dst, FieldOperand(js_function, offset));
+
+  // Save caller-saved registers.
+  PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+  int argument_count = 3;
+  PrepareCallCFunction(argument_count, code_entry);
+  mov(Operand(esp, 0 * kPointerSize), js_function);
+  mov(Operand(esp, 1 * kPointerSize), dst);  // Slot.
+  mov(Operand(esp, 2 * kPointerSize),
+      Immediate(ExternalReference::isolate_address(isolate())));
+
+  {
+    AllowExternalCallThatCantCauseGC scope(this);
+    CallCFunction(
+        ExternalReference::incremental_marking_record_write_code_entry_function(
+            isolate()),
+        argument_count);
+  }
+
+  // Restore caller-saved registers.
+  PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+  // Restore input registers.
+  pop(code_entry);
+  pop(js_function);
+
+  bind(&done);
+}
 
 void MacroAssembler::DebugBreak() {
   Move(eax, Immediate(0));
@@ -587,6 +689,25 @@
 }
 
 
+void MacroAssembler::Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
+  Label msb_set_src;
+  Label jmp_return;
+  test(src, src);
+  j(sign, &msb_set_src, Label::kNear);
+  cvtsi2ss(dst, src);
+  jmp(&jmp_return, Label::kNear);
+  bind(&msb_set_src);
+  mov(tmp, src);
+  shr(src, 1);
+  // Recover the least significant bit to avoid rounding errors.
+  and_(tmp, Immediate(1));
+  or_(src, tmp);
+  cvtsi2ss(dst, src);
+  addss(dst, dst);
+  bind(&jmp_return);
+}
+
+
 bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
   static const int kMaxImmediateBits = 17;
   if (!RelocInfo::IsNone(x.rmode_)) return false;
@@ -833,6 +954,19 @@
 }
 
 
+void MacroAssembler::AssertReceiver(Register object) {
+  if (emit_debug_code()) {
+    test(object, Immediate(kSmiTagMask));
+    Check(not_equal, kOperandIsASmiAndNotAReceiver);
+    Push(object);
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
+    Pop(object);
+    Check(above_equal, kOperandIsNotAReceiver);
+  }
+}
+
+
 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
   if (emit_debug_code()) {
     Label done_checking;
@@ -967,7 +1101,7 @@
 }
 
 
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
+void MacroAssembler::EnterExitFrame(int argc, bool save_doubles) {
   EnterExitFramePrologue();
 
   // Set up argc and argv in callee-saved registers.
@@ -976,7 +1110,7 @@
   lea(esi, Operand(ebp, eax, times_4, offset));
 
   // Reserve space for argc, argv and isolate.
-  EnterExitFrameEpilogue(3, save_doubles);
+  EnterExitFrameEpilogue(argc, save_doubles);
 }
 
 
@@ -1768,13 +1902,13 @@
                                                 Register end_address,
                                                 Register filler) {
   Label loop, entry;
-  jmp(&entry);
+  jmp(&entry, Label::kNear);
   bind(&loop);
   mov(Operand(current_address, 0), filler);
   add(current_address, Immediate(kPointerSize));
   bind(&entry);
   cmp(current_address, end_address);
-  j(below, &loop);
+  j(below, &loop, Label::kNear);
 }
 
 
@@ -1796,9 +1930,9 @@
                                       Label* then_label) {
   Label ok;
   test(result, result);
-  j(not_zero, &ok);
+  j(not_zero, &ok, Label::kNear);
   test(op, op);
-  j(sign, then_label);
+  j(sign, then_label, Label::kNear);
   bind(&ok);
 }
 
@@ -1810,10 +1944,10 @@
                                       Label* then_label) {
   Label ok;
   test(result, result);
-  j(not_zero, &ok);
+  j(not_zero, &ok, Label::kNear);
   mov(scratch, op1);
   or_(scratch, op2);
-  j(sign, then_label);
+  j(sign, then_label, Label::kNear);
   bind(&ok);
 }
 
@@ -2044,7 +2178,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -2147,26 +2281,6 @@
 }
 
 
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                                   const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  DCHECK(flag == JUMP_FUNCTION || has_frame());
-
-  // Fake a parameter count to avoid emitting code to do the check.
-  ParameterCount expected(0);
-  GetBuiltinFunction(edi, native_context_index);
-  InvokeFunctionCode(edi, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
-                                        int native_context_index) {
-  // Load the JavaScript builtin function from the builtins object.
-  mov(target, NativeContextOperand());
-  mov(target, ContextOperand(target, native_context_index));
-}
-
-
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
@@ -2641,9 +2755,9 @@
     // We don't actually want to generate a pile of code for this, so just
     // claim there is a stack frame, without generating one.
     FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   } else {
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   }
   // will not return here
   int3();
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 76c4890..9ebae1f 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -16,6 +16,7 @@
 // Give alias names to registers for calling conventions.
 const Register kReturnRegister0 = {Register::kCode_eax};
 const Register kReturnRegister1 = {Register::kCode_edx};
+const Register kReturnRegister2 = {Register::kCode_edi};
 const Register kJSFunctionRegister = {Register::kCode_edi};
 const Register kContextRegister = {Register::kCode_esi};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
@@ -106,6 +107,16 @@
     j(not_equal, if_not_equal, if_not_equal_distance);
   }
 
+  // These functions do not arrange the registers in any particular order so
+  // they are not useful for calls that can cause a GC.  The caller can
+  // exclude up to 3 registers that do not need to be saved and restored.
+  void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+                       Register exclusion2 = no_reg,
+                       Register exclusion3 = no_reg);
+  void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+                      Register exclusion2 = no_reg,
+                      Register exclusion3 = no_reg);
+
   // ---------------------------------------------------------------------------
   // GC Support
   enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
@@ -206,6 +217,11 @@
       PointersToHereCheck pointers_to_here_check_for_value =
           kPointersToHereMaybeInteresting);
 
+  // Notify the garbage collector that we wrote a code entry into a
+  // JSFunction. Only scratch is clobbered by the operation.
+  void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+                                 Register scratch);
+
   // For page containing |object| mark the region covering the object's map
   // dirty. |object| is the object being stored into, |map| is the Map object
   // that was stored.
@@ -225,7 +241,7 @@
   // arguments in register eax and sets up the number of arguments in
   // register edi and the pointer to the first argument in register
   // esi.
-  void EnterExitFrame(bool save_doubles);
+  void EnterExitFrame(int argc, bool save_doubles);
 
   void EnterApiExitFrame(int argc);
 
@@ -270,6 +286,9 @@
   void StoreToSafepointRegisterSlot(Register dst, Immediate src);
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
+  // Nop, because ia32 does not have a root register.
+  void InitializeRootRegister() {}
+
   void LoadHeapObject(Register result, Handle<HeapObject> object);
   void CmpHeapObject(Register reg, Handle<HeapObject> object);
   void PushHeapObject(Handle<HeapObject> object);
@@ -330,13 +349,6 @@
                       const ParameterCount& actual, InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
-  // Invoke specified builtin JavaScript function.
-  void InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                     const CallWrapper& call_wrapper = NullCallWrapper());
-
-  // Store the function for the given builtin in the target register.
-  void GetBuiltinFunction(Register target, int native_context_index);
-
   // Expression support
   // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
   // hinders register renaming and makes dependence chains longer. So we use
@@ -344,6 +356,8 @@
   void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
   void Cvtsi2sd(XMMRegister dst, const Operand& src);
 
+  void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
+
   // Support for constant splitting.
   bool IsUnsafeImmediate(const Immediate& x);
   void SafeMove(Register dst, const Immediate& x);
@@ -528,6 +542,9 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+  void AssertReceiver(Register object);
+
   // Abort execution if argument is not undefined or an AllocationSite, enabled
   // via --debug-code.
   void AssertUndefinedOrAllocationSite(Register object);
@@ -776,6 +793,7 @@
   // Move an immediate into an XMM register.
   void Move(XMMRegister dst, uint32_t src);
   void Move(XMMRegister dst, uint64_t src);
+  void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
   void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
 
   void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
diff --git a/src/ic/arm/handler-compiler-arm.cc b/src/ic/arm/handler-compiler-arm.cc
index e293965..5f2b861 100644
--- a/src/ic/arm/handler-compiler-arm.cc
+++ b/src/ic/arm/handler-compiler-arm.cc
@@ -4,8 +4,10 @@
 
 #if V8_TARGET_ARCH_ARM
 
-#include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 
@@ -286,11 +288,17 @@
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
-    __ ldr(data,
-           FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
-    __ ldr(data,
-           FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
-    __ ldr(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    if (optimization.is_constant_call()) {
+      __ ldr(data,
+             FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+      __ ldr(data,
+             FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+      __ ldr(data,
+             FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    } else {
+      __ ldr(data,
+             FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+    }
     __ ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
   }
 
@@ -309,7 +317,8 @@
   __ mov(api_function_address, Operand(ref));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+                           !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
 
@@ -394,8 +403,7 @@
   __ b(ne, miss_label);
 }
 
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
                                                         Register value_reg,
                                                         Label* miss_label) {
   Register map_reg = scratch1();
@@ -403,20 +411,11 @@
   DCHECK(!value_reg.is(map_reg));
   DCHECK(!value_reg.is(scratch));
   __ JumpIfSmi(value_reg, miss_label);
-  HeapType::Iterator<Map> it = field_type->Classes();
-  if (!it.Done()) {
+  if (field_type->IsClass()) {
     __ ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    Label do_store;
-    while (true) {
-      __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
-      it.Advance();
-      if (it.Done()) {
-        __ b(ne, miss_label);
-        break;
-      }
-      __ b(eq, &do_store);
-    }
-    __ bind(&do_store);
+    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+                    scratch);
+    __ b(ne, miss_label);
   }
 }
 
@@ -594,38 +593,39 @@
 
 
 void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<ExecutableAccessorInfo> callback) {
-  // Build AccessorInfo::args_ list on the stack and push property name below
-  // the exit frame to make GC aware of them and store pointers to them.
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
-  DCHECK(!scratch2().is(reg));
-  DCHECK(!scratch3().is(reg));
-  DCHECK(!scratch4().is(reg));
+    Register reg, Handle<AccessorInfo> callback) {
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
   __ push(receiver());
-  // Push data from ExecutableAccessorInfo.
+  // Push data from AccessorInfo.
   Handle<Object> data(callback->data(), isolate());
   if (data->IsUndefined() || data->IsSmi()) {
-    __ Move(scratch3(), data);
+    __ Move(scratch2(), data);
   } else {
     Handle<WeakCell> cell =
         isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
     // The callback is alive if this instruction is executed,
     // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch3(), cell);
+    __ GetWeakValue(scratch2(), cell);
   }
-  __ push(scratch3());
-  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
-  __ mov(scratch4(), scratch3());
-  __ Push(scratch3(), scratch4());
-  __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
-  __ Push(scratch4(), reg);
-  __ mov(scratch2(), sp);  // scratch2 = PropertyAccessorInfo::args_
+  __ push(scratch2());
+  __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
+  __ Push(scratch2(), scratch2());
+  __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+  __ Push(scratch2(), reg);
+  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
   __ push(name());
 
   // Abi for CallApiGetter
@@ -714,8 +714,8 @@
 
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
-    Handle<JSObject> object, Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+    LanguageMode language_mode) {
   Register holder_reg = Frontend(name);
 
   __ push(receiver());  // receiver
@@ -732,6 +732,7 @@
   __ push(ip);
   __ mov(ip, Operand(name));
   __ Push(ip, value());
+  __ Push(Smi::FromInt(language_mode));
 
   // Do tail-call to the runtime system.
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
@@ -780,7 +781,7 @@
   }
 
   Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
+  __ IncrementCounter(counters->ic_named_load_global_stub(), 1, r1, r3);
   if (IC::ICUseVector(kind())) {
     DiscardVectorAndSlot();
   }
diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc
index f59ac07..14ed8b4 100644
--- a/src/ic/arm/ic-arm.cc
+++ b/src/ic/arm/ic-arm.cc
@@ -157,8 +157,7 @@
 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
                                   Register key, Register elements,
                                   Register scratch1, Register scratch2,
-                                  Register result, Label* slow,
-                                  LanguageMode language_mode) {
+                                  Register result, Label* slow) {
   // Register use:
   //
   // receiver - holds the receiver on entry.
@@ -215,13 +214,8 @@
   __ jmp(&check_next_prototype);
 
   __ bind(&absent);
-  if (is_strong(language_mode)) {
-    // Strong mode accesses must throw in this case, so call the runtime.
-    __ jmp(slow);
-  } else {
-    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-    __ jmp(&done);
-  }
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ jmp(&done);
 
   __ bind(&in_bounds);
   // Fast case: Do the load.
@@ -264,8 +258,7 @@
   __ bind(&unique);
 }
 
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = r0;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -280,7 +273,7 @@
 
   // Dictionary load failed, go slow (but don't miss).
   __ bind(&slow);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  GenerateRuntimeGetProperty(masm);
 }
 
 
@@ -304,7 +297,7 @@
 
   DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
                      LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->load_miss(), 1, r4, r5);
+  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r4, r5);
 
   LoadIC_PushArgs(masm);
 
@@ -312,17 +305,14 @@
   __ TailCallRuntime(Runtime::kLoadIC_Miss);
 }
 
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                        LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is in lr.
 
   __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
   __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
-                                              : Runtime::kGetProperty);
+  __ TailCallRuntime(Runtime::kGetProperty);
 }
 
 
@@ -332,7 +322,7 @@
 
   DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
                      LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r4, r5);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r4, r5);
 
   LoadIC_PushArgs(masm);
 
@@ -340,22 +330,17 @@
   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
 }
 
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                             LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is in lr.
 
   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
 
   // Perform tail call to the entry.
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
-                                              : Runtime::kKeyedGetProperty);
+  __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
-                                      LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // The return address is in lr.
   Label slow, check_name, index_smi, index_name, property_array_property;
   Label probe_dictionary, check_number_dictionary;
@@ -379,9 +364,9 @@
   // Check the receiver's map to see if it has fast elements.
   __ CheckFastElements(r0, r3, &check_number_dictionary);
 
-  GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow,
-                        language_mode);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
+  GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r4,
+                      r3);
   __ Ret();
 
   __ bind(&check_number_dictionary);
@@ -400,9 +385,9 @@
 
   // Slow case, key and receiver still in r2 and r1.
   __ bind(&slow);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4,
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r4,
                       r3);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  GenerateRuntimeGetProperty(masm);
 
   __ bind(&check_name);
   GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
@@ -446,8 +431,8 @@
   GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
   // Load the property to r0.
   GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4,
-                      r3);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
+                      r4, r3);
   __ Ret();
 
   __ bind(&index_name);
@@ -793,11 +778,11 @@
 
   GenerateDictionaryStore(masm, &miss, dictionary, name, value, r6, r9);
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1, r6, r9);
+  __ IncrementCounter(counters->ic_store_normal_hit(), 1, r6, r9);
   __ Ret();
 
   __ bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1, r6, r9);
+  __ IncrementCounter(counters->ic_store_normal_miss(), 1, r6, r9);
   GenerateMiss(masm);
 }
 
diff --git a/src/ic/arm64/handler-compiler-arm64.cc b/src/ic/arm64/handler-compiler-arm64.cc
index 7cfef6a..51ae3b5 100644
--- a/src/ic/arm64/handler-compiler-arm64.cc
+++ b/src/ic/arm64/handler-compiler-arm64.cc
@@ -4,8 +4,10 @@
 
 #if V8_TARGET_ARCH_ARM64
 
-#include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 
@@ -198,11 +200,17 @@
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
-    __ Ldr(data,
-           FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
-    __ Ldr(data,
-           FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
-    __ Ldr(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    if (optimization.is_constant_call()) {
+      __ Ldr(data,
+             FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+      __ Ldr(data,
+             FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+      __ Ldr(data,
+             FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    } else {
+      __ Ldr(data,
+             FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+    }
     __ Ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
   }
 
@@ -221,7 +229,8 @@
   __ Mov(api_function_address, ref);
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+                           !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
 
@@ -358,7 +367,7 @@
   }
 
   Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+  __ IncrementCounter(counters->ic_named_load_global_stub(), 1, x1, x3);
   if (IC::ICUseVector(kind())) {
     DiscardVectorAndSlot();
   }
@@ -441,8 +450,7 @@
   __ B(ne, miss_label);
 }
 
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
                                                         Register value_reg,
                                                         Label* miss_label) {
   Register map_reg = scratch1();
@@ -450,20 +458,11 @@
   DCHECK(!value_reg.is(map_reg));
   DCHECK(!value_reg.is(scratch));
   __ JumpIfSmi(value_reg, miss_label);
-  HeapType::Iterator<Map> it = field_type->Classes();
-  if (!it.Done()) {
+  if (field_type->IsClass()) {
     __ Ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    Label do_store;
-    while (true) {
-      __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
-      it.Advance();
-      if (it.Done()) {
-        __ B(ne, miss_label);
-        break;
-      }
-      __ B(eq, &do_store);
-    }
-    __ Bind(&do_store);
+    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+                    scratch);
+    __ B(ne, miss_label);
   }
 }
 
@@ -645,19 +644,20 @@
 
 
 void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<ExecutableAccessorInfo> callback) {
+    Register reg, Handle<AccessorInfo> callback) {
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
   DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
 
-  // Build ExecutableAccessorInfo::args_ list on the stack and push property
-  // name below the exit frame to make GC aware of them and store pointers to
-  // them.
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
 
   __ Push(receiver());
 
@@ -673,18 +673,9 @@
   }
   __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
   __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
-  __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
-
-  Register args_addr = scratch2();
-  __ Add(args_addr, __ StackPointer(), kPointerSize);
-
-  // Stack at this point:
-  //              sp[40] callback data
-  //              sp[32] undefined
-  //              sp[24] undefined
-  //              sp[16] isolate
-  // args_addr -> sp[8]  reg
-  //              sp[0]  name
+  __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg);
+  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
+  __ Push(name());
 
   // Abi for CallApiGetter.
   Register getter_address_reg = x2;
@@ -774,8 +765,8 @@
 
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
-    Handle<JSObject> object, Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+    LanguageMode language_mode) {
   ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
   Register holder_reg = Frontend(name);
 
@@ -795,6 +786,7 @@
   }
   __ Mov(scratch2(), Operand(name));
   __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
+  __ Push(Smi::FromInt(language_mode));
 
   // Do tail-call to the runtime system.
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc
index eb933c7..726a68e 100644
--- a/src/ic/arm64/ic-arm64.cc
+++ b/src/ic/arm64/ic-arm64.cc
@@ -164,8 +164,7 @@
 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
                                   Register key, Register elements,
                                   Register scratch1, Register scratch2,
-                                  Register result, Label* slow,
-                                  LanguageMode language_mode) {
+                                  Register result, Label* slow) {
   DCHECK(!AreAliased(receiver, key, elements, scratch1, scratch2));
 
   Label check_prototypes, check_next_prototype;
@@ -203,13 +202,8 @@
   __ B(&check_next_prototype);
 
   __ Bind(&absent);
-  if (is_strong(language_mode)) {
-    // Strong mode accesses must throw in this case, so call the runtime.
-    __ B(slow);
-  } else {
-    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-    __ B(&done);
-  }
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ B(&done);
 
   __ Bind(&in_bounds);
   // Fast case: Do the load.
@@ -260,8 +254,7 @@
   // Fall through if the key is a unique name.
 }
 
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = x0;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -275,7 +268,7 @@
 
   // Dictionary load failed, go slow (but don't miss).
   __ Bind(&slow);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  GenerateRuntimeGetProperty(masm);
 }
 
 
@@ -286,7 +279,7 @@
 
   DCHECK(!AreAliased(x4, x5, LoadWithVectorDescriptor::SlotRegister(),
                      LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->load_miss(), 1, x4, x5);
+  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, x4, x5);
 
   // Perform tail call to the entry.
   __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
@@ -296,15 +289,12 @@
   __ TailCallRuntime(Runtime::kLoadIC_Miss);
 }
 
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                        LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is in lr.
   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
-                                              : Runtime::kGetProperty);
+  __ TailCallRuntime(Runtime::kGetProperty);
 }
 
 
@@ -314,7 +304,7 @@
 
   DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::SlotRegister(),
                      LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, x10, x11);
 
   __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
           LoadWithVectorDescriptor::NameRegister(),
@@ -325,24 +315,19 @@
   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
 }
 
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                             LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is in lr.
   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
-                                              : Runtime::kKeyedGetProperty);
+  __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-
 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
                                         Register receiver, Register scratch1,
                                         Register scratch2, Register scratch3,
                                         Register scratch4, Register scratch5,
-                                        Label* slow,
-                                        LanguageMode language_mode) {
+                                        Label* slow) {
   DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
                      scratch5));
 
@@ -358,8 +343,8 @@
   __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
 
   GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
-                        result, slow, language_mode);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
+                        result, slow);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1,
                       scratch1, scratch2);
   __ Ret();
 
@@ -424,14 +409,12 @@
   GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
   // Load the property.
   GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1,
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
                       scratch1, scratch2);
   __ Ret();
 }
 
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
-                                      LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // The return address is in lr.
   Label slow, check_name, index_smi, index_name;
 
@@ -444,14 +427,13 @@
   __ Bind(&index_smi);
   // Now the key is known to be a smi. This place is also jumped to from below
   // where a numeric string is converted to a smi.
-  GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow,
-                              language_mode);
+  GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
 
   // Slow case.
   __ Bind(&slow);
-  __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
-                      x4, x3);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_generic_slow(),
+                      1, x4, x3);
+  GenerateRuntimeGetProperty(masm);
 
   __ Bind(&check_name);
   GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
@@ -783,12 +765,12 @@
 
   GenerateDictionaryStore(masm, &miss, dictionary, name, value, x6, x7);
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1, x6, x7);
+  __ IncrementCounter(counters->ic_store_normal_hit(), 1, x6, x7);
   __ Ret();
 
   // Cache miss: Jump to runtime.
   __ Bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1, x6, x7);
+  __ IncrementCounter(counters->ic_store_normal_miss(), 1, x6, x7);
   GenerateMiss(masm);
 }
 
diff --git a/src/ic/call-optimization.cc b/src/ic/call-optimization.cc
index 45717b5..571b614 100644
--- a/src/ic/call-optimization.cc
+++ b/src/ic/call-optimization.cc
@@ -8,8 +8,16 @@
 namespace v8 {
 namespace internal {
 
-CallOptimization::CallOptimization(Handle<JSFunction> function) {
-  Initialize(function);
+CallOptimization::CallOptimization(Handle<Object> function) {
+  constant_function_ = Handle<JSFunction>::null();
+  is_simple_api_call_ = false;
+  expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
+  api_call_info_ = Handle<CallHandlerInfo>::null();
+  if (function->IsJSFunction()) {
+    Initialize(Handle<JSFunction>::cast(function));
+  } else if (function->IsFunctionTemplateInfo()) {
+    Initialize(Handle<FunctionTemplateInfo>::cast(function));
+  }
 }
 
 
@@ -27,9 +35,8 @@
     return Handle<JSObject>::null();
   }
   for (int depth = 1; true; depth++) {
-    if (!object_map->prototype()->IsJSObject()) break;
+    if (!object_map->has_hidden_prototype()) break;
     Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
-    if (!prototype->map()->is_hidden_prototype()) break;
     object_map = handle(prototype->map());
     if (expected_receiver_type_->IsTemplateFor(*object_map)) {
       *holder_lookup = kHolderFound;
@@ -80,13 +87,20 @@
   return false;
 }
 
+void CallOptimization::Initialize(
+    Handle<FunctionTemplateInfo> function_template_info) {
+  if (function_template_info->call_code()->IsUndefined()) return;
+  api_call_info_ =
+      handle(CallHandlerInfo::cast(function_template_info->call_code()));
+
+  if (!function_template_info->signature()->IsUndefined()) {
+    expected_receiver_type_ =
+        handle(FunctionTemplateInfo::cast(function_template_info->signature()));
+  }
+  is_simple_api_call_ = true;
+}
 
 void CallOptimization::Initialize(Handle<JSFunction> function) {
-  constant_function_ = Handle<JSFunction>::null();
-  is_simple_api_call_ = false;
-  expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
-  api_call_info_ = Handle<CallHandlerInfo>::null();
-
   if (function.is_null() || !function->is_compiled()) return;
 
   constant_function_ = function;
diff --git a/src/ic/call-optimization.h b/src/ic/call-optimization.h
index 7963d1c..efabd33 100644
--- a/src/ic/call-optimization.h
+++ b/src/ic/call-optimization.h
@@ -15,7 +15,7 @@
 // Holds information about possible function call optimizations.
 class CallOptimization BASE_EMBEDDED {
  public:
-  explicit CallOptimization(Handle<JSFunction> function);
+  explicit CallOptimization(Handle<Object> function);
 
   bool is_constant_call() const { return !constant_function_.is_null(); }
 
@@ -51,6 +51,7 @@
 
  private:
   void Initialize(Handle<JSFunction> function);
+  void Initialize(Handle<FunctionTemplateInfo> function_template_info);
 
   // Determines whether the given function can be called using the
   // fast api call builtin.
diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc
index b353628..803281e 100644
--- a/src/ic/handler-compiler.cc
+++ b/src/ic/handler-compiler.cc
@@ -4,9 +4,10 @@
 
 #include "src/ic/handler-compiler.h"
 
+#include "src/field-type.h"
 #include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
 #include "src/ic/ic-inl.h"
+#include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 #include "src/profiler/cpu-profiler.h"
 
@@ -56,11 +57,7 @@
     if (name->IsPrivate()) {
       // TODO(verwaest): Use nonexistent_private_symbol.
       cache_name = name;
-      JSReceiver* prototype = JSReceiver::cast(current_map->prototype());
-      if (!prototype->map()->is_hidden_prototype() &&
-          !prototype->map()->IsJSGlobalObjectMap()) {
-        break;
-      }
+      if (!current_map->has_hidden_prototype()) break;
     }
 
     last = handle(JSObject::cast(current_map->prototype()));
@@ -228,7 +225,7 @@
 
 
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
-    Handle<Name> name, Handle<ExecutableAccessorInfo> callback) {
+    Handle<Name> name, Handle<AccessorInfo> callback) {
   Register reg = Frontend(name);
   GenerateLoadCallback(reg, callback);
   return GetCode(kind(), Code::FAST, name);
@@ -278,7 +275,7 @@
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
     LookupIterator* it) {
   // So far the most popular follow ups for interceptor loads are DATA and
-  // ExecutableAccessorInfo, so inline only them. Other cases may be added
+  // AccessorInfo, so inline only them. Other cases may be added
   // later.
   bool inline_followup = false;
   switch (it->state()) {
@@ -296,20 +293,20 @@
       break;
     case LookupIterator::ACCESSOR: {
       Handle<Object> accessors = it->GetAccessors();
-      if (accessors->IsExecutableAccessorInfo()) {
-        Handle<ExecutableAccessorInfo> info =
-            Handle<ExecutableAccessorInfo>::cast(accessors);
-        inline_followup = info->getter() != NULL &&
-                          ExecutableAccessorInfo::IsCompatibleReceiverMap(
-                              isolate(), info, map());
+      if (accessors->IsAccessorInfo()) {
+        Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
+        inline_followup =
+            info->getter() != NULL &&
+            AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map());
       } else if (accessors->IsAccessorPair()) {
         Handle<JSObject> property_holder(it->GetHolder<JSObject>());
         Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
                               isolate());
-        if (!getter->IsJSFunction()) break;
+        if (!(getter->IsJSFunction() || getter->IsFunctionTemplateInfo())) {
+          break;
+        }
         if (!property_holder->HasFastProperties()) break;
-        auto function = Handle<JSFunction>::cast(getter);
-        CallOptimization call_optimization(function);
+        CallOptimization call_optimization(getter);
         Handle<Map> receiver_map = map();
         inline_followup = call_optimization.is_simple_api_call() &&
                           call_optimization.IsCompatibleReceiverMap(
@@ -396,14 +393,14 @@
       break;
     }
     case LookupIterator::ACCESSOR:
-      if (it->GetAccessors()->IsExecutableAccessorInfo()) {
-        Handle<ExecutableAccessorInfo> info =
-            Handle<ExecutableAccessorInfo>::cast(it->GetAccessors());
+      if (it->GetAccessors()->IsAccessorInfo()) {
+        Handle<AccessorInfo> info =
+            Handle<AccessorInfo>::cast(it->GetAccessors());
         DCHECK_NOT_NULL(info->getter());
         GenerateLoadCallback(reg, info);
       } else {
-        auto function = handle(JSFunction::cast(
-            AccessorPair::cast(*it->GetAccessors())->getter()));
+        Handle<Object> function = handle(
+            AccessorPair::cast(*it->GetAccessors())->getter(), isolate());
         CallOptimization call_optimization(function);
         GenerateApiAccessorCall(masm(), call_optimization, holder_map,
                                 receiver(), scratch2(), false, no_reg, reg,
@@ -437,8 +434,9 @@
     PrototypeIterator::WhereToEnd end =
         name->IsPrivate() ? PrototypeIterator::END_AT_NON_HIDDEN
                           : PrototypeIterator::END_AT_NULL;
-    PrototypeIterator iter(isolate(), holder());
-    while (!iter.IsAtEnd(end)) {
+    PrototypeIterator iter(isolate(), holder(),
+                           PrototypeIterator::START_AT_PROTOTYPE, end);
+    while (!iter.IsAtEnd()) {
       last = PrototypeIterator::GetCurrent<JSObject>(iter);
       iter.Advance();
     }
@@ -510,10 +508,9 @@
   return GetCode(kind(), Code::FAST, name);
 }
 
-
 bool NamedStoreHandlerCompiler::RequiresFieldTypeChecks(
-    HeapType* field_type) const {
-  return !field_type->Classes().Done();
+    FieldType* field_type) const {
+  return field_type->IsClass();
 }
 
 
@@ -521,7 +518,7 @@
   Label miss;
   DCHECK(it->representation().IsHeapObject());
 
-  HeapType* field_type = *it->GetFieldType();
+  FieldType* field_type = *it->GetFieldType();
   bool need_save_restore = false;
   if (RequiresFieldTypeChecks(field_type)) {
     need_save_restore = IC::ICUseVector(kind());
@@ -564,10 +561,8 @@
 
 #undef __
 
-
 void ElementHandlerCompiler::CompileElementHandlers(
-    MapHandleList* receiver_maps, CodeHandleList* handlers,
-    LanguageMode language_mode) {
+    MapHandleList* receiver_maps, CodeHandleList* handlers) {
   for (int i = 0; i < receiver_maps->length(); ++i) {
     Handle<Map> receiver_map = receiver_maps->at(i);
     Handle<Code> cached_stub;
@@ -575,9 +570,7 @@
     if (receiver_map->IsStringMap()) {
       cached_stub = LoadIndexedStringStub(isolate()).GetCode();
     } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
-      cached_stub = is_strong(language_mode)
-                        ? isolate()->builtins()->KeyedLoadIC_Slow_Strong()
-                        : isolate()->builtins()->KeyedLoadIC_Slow();
+      cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
     } else {
       bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
       ElementsKind elements_kind = receiver_map->elements_kind();
@@ -586,9 +579,7 @@
       // generated stub code needs to check that dynamically anyway.
       bool convert_hole_to_undefined =
           (is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
-           *receiver_map ==
-               isolate()->get_initial_js_array_map(elements_kind)) &&
-          !is_strong(language_mode);
+           *receiver_map == isolate()->get_initial_js_array_map(elements_kind));
 
       if (receiver_map->has_indexed_interceptor()) {
         cached_stub = LoadIndexedInterceptorStub(isolate()).GetCode();
@@ -600,9 +591,7 @@
                                           convert_hole_to_undefined).GetCode();
       } else {
         DCHECK(elements_kind == DICTIONARY_ELEMENTS);
-        LoadICState state =
-            LoadICState(is_strong(language_mode) ? LoadICState::kStrongModeState
-                                                 : kNoExtraICState);
+        LoadICState state = LoadICState(kNoExtraICState);
         cached_stub = LoadDictionaryElementStub(isolate(), state).GetCode();
       }
     }
diff --git a/src/ic/handler-compiler.h b/src/ic/handler-compiler.h
index fe59210..45d7d73 100644
--- a/src/ic/handler-compiler.h
+++ b/src/ic/handler-compiler.h
@@ -123,7 +123,7 @@
   Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index);
 
   Handle<Code> CompileLoadCallback(Handle<Name> name,
-                                   Handle<ExecutableAccessorInfo> callback);
+                                   Handle<AccessorInfo> callback);
 
   Handle<Code> CompileLoadCallback(Handle<Name> name,
                                    const CallOptimization& call_optimization,
@@ -180,8 +180,7 @@
  private:
   Handle<Code> CompileLoadNonexistent(Handle<Name> name);
   void GenerateLoadConstant(Handle<Object> value);
-  void GenerateLoadCallback(Register reg,
-                            Handle<ExecutableAccessorInfo> callback);
+  void GenerateLoadCallback(Register reg, Handle<AccessorInfo> callback);
   void GenerateLoadCallback(const CallOptimization& call_optimization,
                             Handle<Map> receiver_map);
 
@@ -224,7 +223,8 @@
                                       Handle<Name> name);
   Handle<Code> CompileStoreField(LookupIterator* it);
   Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
-                                    Handle<ExecutableAccessorInfo> callback);
+                                    Handle<AccessorInfo> callback,
+                                    LanguageMode language_mode);
   Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
                                     const CallOptimization& call_optimization,
                                     int accessor_index);
@@ -265,8 +265,8 @@
                              Register value_reg, Register scratch,
                              Label* miss_label);
 
-  bool RequiresFieldTypeChecks(HeapType* field_type) const;
-  void GenerateFieldTypeChecks(HeapType* field_type, Register value_reg,
+  bool RequiresFieldTypeChecks(FieldType* field_type) const;
+  void GenerateFieldTypeChecks(FieldType* field_type, Register value_reg,
                                Label* miss_label);
 
   static Builtins::Name SlowBuiltin(Code::Kind kind) {
@@ -295,8 +295,7 @@
   virtual ~ElementHandlerCompiler() {}
 
   void CompileElementHandlers(MapHandleList* receiver_maps,
-                              CodeHandleList* handlers,
-                              LanguageMode language_mode);
+                              CodeHandleList* handlers);
 
   static void GenerateStoreSlow(MacroAssembler* masm);
 };
diff --git a/src/ic/ia32/handler-compiler-ia32.cc b/src/ic/ia32/handler-compiler-ia32.cc
index 0b380b3..3bdddf9 100644
--- a/src/ic/ia32/handler-compiler-ia32.cc
+++ b/src/ic/ia32/handler-compiler-ia32.cc
@@ -4,8 +4,10 @@
 
 #if V8_TARGET_ARCH_IA32
 
-#include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 
@@ -197,9 +199,13 @@
     call_data_undefined = true;
     __ mov(data, Immediate(isolate->factory()->undefined_value()));
   } else {
-    __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
-    __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
-    __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    if (optimization.is_constant_call()) {
+      __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+      __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+      __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    } else {
+      __ mov(data, FieldOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+    }
     __ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
   }
 
@@ -214,7 +220,8 @@
   __ mov(api_function_address, Immediate(function_address));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+                           !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
 
@@ -399,8 +406,7 @@
   __ j(not_equal, miss_label);
 }
 
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
                                                         Register value_reg,
                                                         Label* miss_label) {
   Register map_reg = scratch1();
@@ -408,20 +414,11 @@
   DCHECK(!value_reg.is(map_reg));
   DCHECK(!value_reg.is(scratch));
   __ JumpIfSmi(value_reg, miss_label);
-  HeapType::Iterator<Map> it = field_type->Classes();
-  if (!it.Done()) {
-    Label do_store;
+  if (field_type->IsClass()) {
     __ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-    while (true) {
-      __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
-      it.Advance();
-      if (it.Done()) {
-        __ j(not_equal, miss_label);
-        break;
-      }
-      __ j(equal, &do_store, Label::kNear);
-    }
-    __ bind(&do_store);
+    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+                    scratch);
+    __ j(not_equal, miss_label);
   }
 }
 
@@ -593,24 +590,30 @@
 
 
 void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<ExecutableAccessorInfo> callback) {
+    Register reg, Handle<AccessorInfo> callback) {
+  DCHECK(!AreAliased(scratch2(), scratch3(), receiver()));
+  DCHECK(!AreAliased(scratch2(), scratch3(), reg));
+
   // Insert additional parameters into the stack frame above return address.
-  DCHECK(!scratch3().is(reg));
   __ pop(scratch3());  // Get return address to place it below.
 
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
   __ push(receiver());  // receiver
-  // Push data from ExecutableAccessorInfo.
+  // Push data from AccessorInfo.
   Handle<Object> data(callback->data(), isolate());
   if (data->IsUndefined() || data->IsSmi()) {
     __ push(Immediate(data));
   } else {
-    DCHECK(!scratch2().is(reg));
     Handle<WeakCell> cell =
         isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
     // The callback is alive if this instruction is executed,
@@ -623,13 +626,9 @@
   __ push(Immediate(isolate()->factory()->undefined_value()));
   __ push(Immediate(reinterpret_cast<int>(isolate())));
   __ push(reg);  // holder
-
-  // Save a pointer to where we pushed the arguments. This will be
-  // passed as the const PropertyAccessorInfo& to the C++ callback.
-  __ push(esp);
+  __ push(Immediate(Smi::FromInt(0)));  // should_throw_on_error -> false
 
   __ push(name());  // name
-
   __ push(scratch3());  // Restore return address.
 
   // Abi for CallApiGetter
@@ -731,8 +730,8 @@
 
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
-    Handle<JSObject> object, Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+    LanguageMode language_mode) {
   Register holder_reg = Frontend(name);
 
   __ pop(scratch1());  // remove the return address
@@ -748,6 +747,7 @@
   }
   __ Push(name);
   __ push(value());
+  __ push(Immediate(Smi::FromInt(language_mode)));
   __ push(scratch1());  // restore return address
 
   // Do tail-call to the runtime system.
@@ -802,7 +802,7 @@
   }
 
   Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1);
+  __ IncrementCounter(counters->ic_named_load_global_stub(), 1);
   // The code above already loads the result into the return register.
   if (IC::ICUseVector(kind())) {
     DiscardVectorAndSlot();
diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc
index 88947e4..0eba427 100644
--- a/src/ic/ia32/ic-ia32.cc
+++ b/src/ic/ia32/ic-ia32.cc
@@ -167,7 +167,7 @@
 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
                                   Register key, Register scratch,
                                   Register scratch2, Register result,
-                                  Label* slow, LanguageMode language_mode) {
+                                  Label* slow) {
   // Register use:
   //   receiver - holds the receiver and is unchanged.
   //   key - holds the key and is unchanged (must be a smi).
@@ -211,13 +211,8 @@
   __ jmp(&check_next_prototype);
 
   __ bind(&absent);
-  if (is_strong(language_mode)) {
-    // Strong mode accesses must throw in this case, so call the runtime.
-    __ jmp(slow);
-  } else {
-    __ mov(result, masm->isolate()->factory()->undefined_value());
-    __ jmp(&done);
-  }
+  __ mov(result, masm->isolate()->factory()->undefined_value());
+  __ jmp(&done);
 
   __ bind(&in_bounds);
   // Fast case: Do the load.
@@ -262,9 +257,7 @@
   __ bind(&unique);
 }
 
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
-                                      LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // The return address is on the stack.
   Label slow, check_name, index_smi, index_name, property_array_property;
   Label probe_dictionary, check_number_dictionary;
@@ -286,11 +279,10 @@
   // Check the receiver's map to see if it has fast elements.
   __ CheckFastElements(eax, &check_number_dictionary);
 
-  GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow,
-                        language_mode);
+  GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
   Isolate* isolate = masm->isolate();
   Counters* counters = isolate->counters();
-  __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+  __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
   __ ret(0);
 
   __ bind(&check_number_dictionary);
@@ -318,8 +310,8 @@
 
   __ bind(&slow);
   // Slow case: jump to runtime.
-  __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
+  GenerateRuntimeGetProperty(masm);
 
   __ bind(&check_name);
   GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
@@ -363,7 +355,7 @@
   GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
 
   GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
-  __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+  __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
   __ ret(0);
 
   __ bind(&index_name);
@@ -628,8 +620,7 @@
   GenerateMiss(masm);
 }
 
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = eax;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -644,7 +635,7 @@
 
   // Dictionary load failed, go slow (but don't miss).
   __ bind(&slow);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  GenerateRuntimeGetProperty(masm);
 }
 
 
@@ -668,16 +659,14 @@
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
   // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
+  __ IncrementCounter(masm->isolate()->counters()->ic_load_miss(), 1);
   LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   __ TailCallRuntime(Runtime::kLoadIC_Miss);
 }
 
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                        LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // Return address is on the stack.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
@@ -689,14 +678,13 @@
   __ push(ebx);
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
-                                              : Runtime::kGetProperty);
+  __ TailCallRuntime(Runtime::kGetProperty);
 }
 
 
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
+  __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_miss(), 1);
 
   LoadIC_PushArgs(masm);
 
@@ -704,9 +692,7 @@
   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
 }
 
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                             LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // Return address is on the stack.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
@@ -718,8 +704,7 @@
   __ push(ebx);
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
-                                              : Runtime::kKeyedGetProperty);
+  __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
 
@@ -777,14 +762,14 @@
                           receiver, edi);
   __ Drop(3);
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1);
+  __ IncrementCounter(counters->ic_store_normal_hit(), 1);
   __ ret(0);
 
   __ bind(&restore_miss);
   __ pop(slot);
   __ pop(vector);
   __ pop(receiver);
-  __ IncrementCounter(counters->store_normal_miss(), 1);
+  __ IncrementCounter(counters->ic_store_normal_miss(), 1);
   GenerateMiss(masm);
 }
 
diff --git a/src/ic/ic-compiler.cc b/src/ic/ic-compiler.cc
index ae4b2a5..f74c69e 100644
--- a/src/ic/ic-compiler.cc
+++ b/src/ic/ic-compiler.cc
@@ -43,13 +43,11 @@
   // stub code needs to check that dynamically anyway.
   bool convert_hole_to_undefined =
       is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
-      *receiver_map == isolate->get_initial_js_array_map(elements_kind) &&
-      !(is_strong(LoadICState::GetLanguageMode(extra_ic_state)));
+      *receiver_map == isolate->get_initial_js_array_map(elements_kind);
   Handle<Code> stub;
   if (receiver_map->has_indexed_interceptor()) {
     stub = LoadIndexedInterceptorStub(isolate).GetCode();
   } else if (receiver_map->IsStringMap()) {
-    // We have a string.
     stub = LoadIndexedStringStub(isolate).GetCode();
   } else if (receiver_map->has_sloppy_arguments_elements()) {
     stub = KeyedLoadSloppyArgumentsStub(isolate).GetCode();
@@ -58,6 +56,7 @@
     stub = LoadFastElementStub(isolate, is_js_array, elements_kind,
                                convert_hole_to_undefined).GetCode();
   } else {
+    DCHECK(receiver_map->has_dictionary_elements());
     stub = LoadDictionaryElementStub(isolate, LoadICState(extra_ic_state))
                .GetCode();
   }
diff --git a/src/ic/ic-inl.h b/src/ic/ic-inl.h
index 6dab006..998bd8c 100644
--- a/src/ic/ic-inl.h
+++ b/src/ic/ic-inl.h
@@ -95,9 +95,6 @@
   // The contextual mode must be preserved across IC patching.
   DCHECK(LoadICState::GetTypeofMode(code->extra_ic_state()) ==
          LoadICState::GetTypeofMode(target()->extra_ic_state()));
-  // Strongness must be preserved across IC patching.
-  DCHECK(LoadICState::GetLanguageMode(code->extra_ic_state()) ==
-         LoadICState::GetLanguageMode(target()->extra_ic_state()));
 
   IC::set_target(code);
 }
diff --git a/src/ic/ic-state.cc b/src/ic/ic-state.cc
index 4bdaf3f..933803c 100644
--- a/src/ic/ic-state.cc
+++ b/src/ic/ic-state.cc
@@ -37,7 +37,6 @@
       isolate_(isolate) {
   op_ =
       static_cast<Token::Value>(FIRST_TOKEN + OpField::decode(extra_ic_state));
-  strong_ = StrengthField::decode(extra_ic_state);
   left_kind_ = LeftKindField::decode(extra_ic_state);
   right_kind_ = fixed_right_arg_.IsJust()
                     ? (Smi::IsValid(fixed_right_arg_.FromJust()) ? SMI : INT32)
@@ -51,7 +50,7 @@
 ExtraICState BinaryOpICState::GetExtraICState() const {
   ExtraICState extra_ic_state =
       OpField::encode(op_ - FIRST_TOKEN) | LeftKindField::encode(left_kind_) |
-      ResultKindField::encode(result_kind_) | StrengthField::encode(strong_) |
+      ResultKindField::encode(result_kind_) |
       HasFixedRightArgField::encode(fixed_right_arg_.IsJust());
   if (fixed_right_arg_.IsJust()) {
     extra_ic_state = FixedRightArgValueField::update(
@@ -72,7 +71,7 @@
 // Generated list of commonly used stubs
 #define GENERATE(op, left_kind, right_kind, result_kind) \
   do {                                                   \
-    BinaryOpICState state(isolate, op, Strength::WEAK);  \
+    BinaryOpICState state(isolate, op);                  \
     state.left_kind_ = left_kind;                        \
     state.fixed_right_arg_ = Nothing<int>();             \
     state.right_kind_ = right_kind;                      \
@@ -174,7 +173,7 @@
 #undef GENERATE
 #define GENERATE(op, left_kind, fixed_right_arg_value, result_kind) \
   do {                                                              \
-    BinaryOpICState state(isolate, op, Strength::WEAK);             \
+    BinaryOpICState state(isolate, op);                             \
     state.left_kind_ = left_kind;                                   \
     state.fixed_right_arg_ = Just(fixed_right_arg_value);           \
     state.right_kind_ = SMI;                                        \
@@ -208,7 +207,6 @@
 std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s) {
   os << "(" << Token::Name(s.op_);
   if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
-  if (is_strong(s.strength())) os << "_Strong";
   os << ":" << BinaryOpICState::KindToString(s.left_kind_) << "*";
   if (s.fixed_right_arg_.IsJust()) {
     os << s.fixed_right_arg_.FromJust();
@@ -371,25 +369,25 @@
 Type* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
   switch (state) {
     case UNINITIALIZED:
-      return Type::None(zone);
+      return Type::None();
     case BOOLEAN:
-      return Type::Boolean(zone);
+      return Type::Boolean();
     case SMI:
-      return Type::SignedSmall(zone);
+      return Type::SignedSmall();
     case NUMBER:
-      return Type::Number(zone);
+      return Type::Number();
     case STRING:
-      return Type::String(zone);
+      return Type::String();
     case INTERNALIZED_STRING:
-      return Type::InternalizedString(zone);
+      return Type::InternalizedString();
     case UNIQUE_NAME:
-      return Type::UniqueName(zone);
+      return Type::UniqueName();
     case RECEIVER:
-      return Type::Receiver(zone);
+      return Type::Receiver();
     case KNOWN_RECEIVER:
-      return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
+      return map.is_null() ? Type::Receiver() : Type::Class(map, zone);
     case GENERIC:
-      return Type::Any(zone);
+      return Type::Any();
   }
   UNREACHABLE();
   return NULL;
diff --git a/src/ic/ic-state.h b/src/ic/ic-state.h
index 1982fbe..e1d33f8 100644
--- a/src/ic/ic-state.h
+++ b/src/ic/ic-state.h
@@ -25,9 +25,11 @@
  public:
   explicit CallICState(ExtraICState extra_ic_state)
       : bit_field_(extra_ic_state) {}
-  CallICState(int argc, ConvertReceiverMode convert_mode)
+  CallICState(int argc, ConvertReceiverMode convert_mode,
+              TailCallMode tail_call_mode)
       : bit_field_(ArgcBits::encode(argc) |
-                   ConvertModeBits::encode(convert_mode)) {}
+                   ConvertModeBits::encode(convert_mode) |
+                   TailCallModeBits::encode(tail_call_mode)) {}
 
   ExtraICState GetExtraICState() const { return bit_field_; }
 
@@ -39,11 +41,14 @@
   ConvertReceiverMode convert_mode() const {
     return ConvertModeBits::decode(bit_field_);
   }
+  TailCallMode tail_call_mode() const {
+    return TailCallModeBits::decode(bit_field_);
+  }
 
  private:
   typedef BitField<int, 0, Code::kArgumentsBits> ArgcBits;
-  typedef BitField<ConvertReceiverMode, Code::kArgumentsBits, 2>
-      ConvertModeBits;
+  typedef BitField<ConvertReceiverMode, ArgcBits::kNext, 2> ConvertModeBits;
+  typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
 
   int const bit_field_;
 };
@@ -55,9 +60,8 @@
 class BinaryOpICState final BASE_EMBEDDED {
  public:
   BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state);
-  BinaryOpICState(Isolate* isolate, Token::Value op, Strength strength)
+  BinaryOpICState(Isolate* isolate, Token::Value op)
       : op_(op),
-        strong_(is_strong(strength)),
         left_kind_(NONE),
         right_kind_(NONE),
         result_kind_(NONE),
@@ -104,10 +108,6 @@
     return Max(left_kind_, right_kind_) == GENERIC;
   }
 
-  Strength strength() const {
-    return strong_ ? Strength::STRONG : Strength::WEAK;
-  }
-
   // Returns true if the IC should enable the inline smi code (i.e. if either
   // parameter may be a smi).
   bool UseInlinedSmiCode() const {
@@ -146,15 +146,13 @@
   class OpField : public BitField<int, 0, 4> {};
   class ResultKindField : public BitField<Kind, 4, 3> {};
   class LeftKindField : public BitField<Kind, 7, 3> {};
-  class StrengthField : public BitField<bool, 10, 1> {};
   // When fixed right arg is set, we don't need to store the right kind.
   // Thus the two fields can overlap.
-  class HasFixedRightArgField : public BitField<bool, 11, 1> {};
-  class FixedRightArgValueField : public BitField<int, 12, 4> {};
-  class RightKindField : public BitField<Kind, 12, 3> {};
+  class HasFixedRightArgField : public BitField<bool, 10, 1> {};
+  class FixedRightArgValueField : public BitField<int, 11, 4> {};
+  class RightKindField : public BitField<Kind, 11, 3> {};
 
   Token::Value op_;
-  bool strong_;
   Kind left_kind_;
   Kind right_kind_;
   Kind result_kind_;
@@ -204,38 +202,24 @@
 class LoadICState final BASE_EMBEDDED {
  private:
   class TypeofModeBits : public BitField<TypeofMode, 0, 1> {};
-  class LanguageModeBits
-      : public BitField<LanguageMode, TypeofModeBits::kNext, 2> {};
   STATIC_ASSERT(static_cast<int>(INSIDE_TYPEOF) == 0);
   const ExtraICState state_;
 
  public:
-  static const uint32_t kNextBitFieldOffset = LanguageModeBits::kNext;
-
-  static const ExtraICState kStrongModeState = STRONG
-                                               << LanguageModeBits::kShift;
+  static const uint32_t kNextBitFieldOffset = TypeofModeBits::kNext;
 
   explicit LoadICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
 
-  explicit LoadICState(TypeofMode typeof_mode, LanguageMode language_mode)
-      : state_(TypeofModeBits::encode(typeof_mode) |
-               LanguageModeBits::encode(language_mode)) {}
+  explicit LoadICState(TypeofMode typeof_mode)
+      : state_(TypeofModeBits::encode(typeof_mode)) {}
 
   ExtraICState GetExtraICState() const { return state_; }
 
   TypeofMode typeof_mode() const { return TypeofModeBits::decode(state_); }
 
-  LanguageMode language_mode() const {
-    return LanguageModeBits::decode(state_);
-  }
-
   static TypeofMode GetTypeofMode(ExtraICState state) {
     return LoadICState(state).typeof_mode();
   }
-
-  static LanguageMode GetLanguageMode(ExtraICState state) {
-    return LoadICState(state).language_mode();
-  }
 };
 
 
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index 73ac666..c0b3e49 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -11,6 +11,7 @@
 #include "src/codegen.h"
 #include "src/conversions.h"
 #include "src/execution.h"
+#include "src/field-type.h"
 #include "src/frames-inl.h"
 #include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
@@ -21,6 +22,8 @@
 #include "src/macro-assembler.h"
 #include "src/prototype.h"
 #include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/tracing/trace-event.h"
 
 namespace v8 {
 namespace internal {
@@ -545,8 +548,7 @@
   CompareICStub stub(target->stub_key(), isolate);
   // Only clear CompareICs that can retain objects.
   if (stub.state() != CompareICState::KNOWN_RECEIVER) return;
-  SetTargetAtAddress(address,
-                     GetRawUninitialized(isolate, stub.op(), stub.strength()),
+  SetTargetAtAddress(address, GetRawUninitialized(isolate, stub.op()),
                      constant_pool);
   PatchInlinedSmiCode(isolate, address, DISABLE_INLINED_SMI_CHECK);
 }
@@ -558,9 +560,7 @@
   if (FLAG_compiled_keyed_generic_loads) {
     return KeyedLoadGenericStub(isolate, LoadICState(extra_state)).GetCode();
   } else {
-    return is_strong(LoadICState::GetLanguageMode(extra_state))
-               ? isolate->builtins()->KeyedLoadIC_Megamorphic_Strong()
-               : isolate->builtins()->KeyedLoadIC_Megamorphic();
+    return isolate->builtins()->KeyedLoadIC_Megamorphic();
   }
 }
 
@@ -573,13 +573,21 @@
   return true;
 }
 
-
-void IC::ConfigureVectorState(IC::State new_state) {
+void IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
   DCHECK(UseVector());
   if (new_state == PREMONOMORPHIC) {
     nexus()->ConfigurePremonomorphic();
   } else if (new_state == MEGAMORPHIC) {
-    nexus()->ConfigureMegamorphic();
+    if (kind() == Code::LOAD_IC || kind() == Code::STORE_IC) {
+      nexus()->ConfigureMegamorphic();
+    } else if (kind() == Code::KEYED_LOAD_IC) {
+      KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+      nexus->ConfigureMegamorphicKeyed(key->IsName() ? PROPERTY : ELEMENT);
+    } else {
+      DCHECK(kind() == Code::KEYED_STORE_IC);
+      KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
+      nexus->ConfigureMegamorphicKeyed(key->IsName() ? PROPERTY : ELEMENT);
+    }
   } else {
     UNREACHABLE();
   }
@@ -662,14 +670,14 @@
     // Rewrite to the generic keyed load stub.
     if (FLAG_use_ic) {
       DCHECK(UseVector());
-      ConfigureVectorState(MEGAMORPHIC);
+      ConfigureVectorState(MEGAMORPHIC, name);
       TRACE_IC("LoadIC", name);
       TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
     }
     Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(), result,
-        Object::GetElement(isolate(), object, index, language_mode()), Object);
+    ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+                               Object::GetElement(isolate(), object, index),
+                               Object);
     return result;
   }
 
@@ -685,9 +693,9 @@
     ScriptContextTable::LookupResult lookup_result;
     if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
       Handle<Object> result =
-          FixedArray::get(ScriptContextTable::GetContext(
+          FixedArray::get(*ScriptContextTable::GetContext(
                               script_contexts, lookup_result.context_index),
-                          lookup_result.slot_index);
+                          lookup_result.slot_index, isolate());
       if (*result == *isolate()->factory()->the_hole_value()) {
         // Do not install stubs and stay pre-monomorphic for
         // uninitialized accesses.
@@ -713,8 +721,8 @@
     // Get the property.
     Handle<Object> result;
 
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(), result, Object::GetProperty(&it, language_mode()), Object);
+    ASSIGN_RETURN_ON_EXCEPTION(isolate(), result, Object::GetProperty(&it),
+                               Object);
     if (it.IsFound()) {
       return result;
     } else if (!ShouldThrowReferenceError(object)) {
@@ -853,7 +861,7 @@
         CopyICToMegamorphicCache(name);
       }
       if (UseVector()) {
-        ConfigureVectorState(MEGAMORPHIC);
+        ConfigureVectorState(MEGAMORPHIC, name);
       } else {
         set_target(*megamorphic_stub());
       }
@@ -899,9 +907,7 @@
   if (initialization_state != MEGAMORPHIC) {
     return KeyedLoadICStub(isolate, LoadICState(extra_state)).GetCode();
   }
-  return is_strong(LoadICState::GetLanguageMode(extra_state))
-             ? isolate->builtins()->KeyedLoadIC_Megamorphic_Strong()
-             : isolate->builtins()->KeyedLoadIC_Megamorphic();
+  return isolate->builtins()->KeyedLoadIC_Megamorphic();
 }
 
 
@@ -976,29 +982,34 @@
   DCHECK(lookup->state() == LookupIterator::ACCESSOR);
   Isolate* isolate = lookup->isolate();
   Handle<Object> accessors = lookup->GetAccessors();
-  if (accessors->IsExecutableAccessorInfo()) {
-    Handle<ExecutableAccessorInfo> info =
-        Handle<ExecutableAccessorInfo>::cast(accessors);
+  if (accessors->IsAccessorInfo()) {
+    Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
     if (info->getter() != NULL &&
-        !ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate, info,
-                                                         receiver_map)) {
+        !AccessorInfo::IsCompatibleReceiverMap(isolate, info, receiver_map)) {
       return false;
     }
   } else if (accessors->IsAccessorPair()) {
     Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
                           isolate);
+    if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo())
+      return false;
     Handle<JSObject> holder = lookup->GetHolder<JSObject>();
     Handle<Object> receiver = lookup->GetReceiver();
-    if (getter->IsJSFunction() && holder->HasFastProperties()) {
-      Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
-      if (receiver->IsJSObject() || function->shared()->IsBuiltin() ||
-          !is_sloppy(function->shared()->language_mode())) {
-        CallOptimization call_optimization(function);
-        if (call_optimization.is_simple_api_call() &&
-            !call_optimization.IsCompatibleReceiverMap(receiver_map, holder)) {
+    if (holder->HasFastProperties()) {
+      if (getter->IsJSFunction()) {
+        Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+        if (!receiver->IsJSObject() && !function->shared()->IsBuiltin() &&
+            is_sloppy(function->shared()->language_mode())) {
+          // Calling sloppy non-builtins with a value as the receiver
+          // requires boxing.
           return false;
         }
       }
+      CallOptimization call_optimization(getter);
+      if (call_optimization.is_simple_api_call() &&
+          !call_optimization.IsCompatibleReceiverMap(receiver_map, holder)) {
+        return false;
+      }
     }
   }
   return true;
@@ -1009,7 +1020,7 @@
   if (state() == UNINITIALIZED) {
     // This is the first time we execute this inline cache. Set the target to
     // the pre monomorphic stub to delay setting the monomorphic state.
-    ConfigureVectorState(PREMONOMORPHIC);
+    ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
     TRACE_IC("LoadIC", lookup->name());
     return;
   }
@@ -1019,7 +1030,7 @@
       lookup->state() == LookupIterator::ACCESS_CHECK) {
     code = slow_stub();
   } else if (!lookup->IsFound()) {
-    if (kind() == Code::LOAD_IC && !is_strong(language_mode())) {
+    if (kind() == Code::LOAD_IC) {
       code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
                                                               receiver_map());
       // TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
@@ -1168,50 +1179,39 @@
         return stub.GetCode();
       }
 
-      Handle<Object> accessors = lookup->GetAccessors();
-      if (accessors->IsExecutableAccessorInfo()) {
-        Handle<ExecutableAccessorInfo> info =
-            Handle<ExecutableAccessorInfo>::cast(accessors);
-        if (v8::ToCData<Address>(info->getter()) == 0) break;
-        if (!ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info,
-                                                             map)) {
-          // This case should be already handled in LoadIC::UpdateCaches.
-          UNREACHABLE();
-          break;
-        }
-        if (!holder->HasFastProperties()) break;
-        NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
-        return compiler.CompileLoadCallback(lookup->name(), info);
-      }
-      if (accessors->IsAccessorPair()) {
-        Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
-                              isolate());
-        if (!getter->IsJSFunction()) break;
-        if (!holder->HasFastProperties()) break;
-        // When debugging we need to go the slow path to flood the accessor.
-        if (GetSharedFunctionInfo()->HasDebugInfo()) break;
-        Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
-        if (!receiver->IsJSObject() && !function->shared()->IsBuiltin() &&
-            is_sloppy(function->shared()->language_mode())) {
-          // Calling sloppy non-builtins with a value as the receiver
-          // requires boxing.
-          break;
-        }
-        CallOptimization call_optimization(function);
-        NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
-        if (call_optimization.is_simple_api_call()) {
-          if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
+      if (IsCompatibleReceiver(lookup, map)) {
+        Handle<Object> accessors = lookup->GetAccessors();
+        if (accessors->IsAccessorPair()) {
+          if (!holder->HasFastProperties()) break;
+          // When debugging we need to go the slow path to flood the accessor.
+          if (GetSharedFunctionInfo()->HasDebugInfo()) break;
+          Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
+                                isolate());
+          CallOptimization call_optimization(getter);
+          NamedLoadHandlerCompiler compiler(isolate(), map, holder,
+                                            cache_holder);
+          if (call_optimization.is_simple_api_call()) {
             return compiler.CompileLoadCallback(
                 lookup->name(), call_optimization, lookup->GetAccessorIndex());
-          } else {
+          }
+          int expected_arguments = Handle<JSFunction>::cast(getter)
+                                       ->shared()
+                                       ->internal_formal_parameter_count();
+          return compiler.CompileLoadViaGetter(
+              lookup->name(), lookup->GetAccessorIndex(), expected_arguments);
+        } else if (accessors->IsAccessorInfo()) {
+          Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
+          if (v8::ToCData<Address>(info->getter()) == 0) break;
+          if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map)) {
             // This case should be already handled in LoadIC::UpdateCaches.
             UNREACHABLE();
+            break;
           }
+          if (!holder->HasFastProperties()) break;
+          NamedLoadHandlerCompiler compiler(isolate(), map, holder,
+                                            cache_holder);
+          return compiler.CompileLoadCallback(lookup->name(), info);
         }
-        int expected_arguments =
-            function->shared()->internal_formal_parameter_count();
-        return compiler.CompileLoadViaGetter(
-            lookup->name(), lookup->GetAccessorIndex(), expected_arguments);
       }
       break;
     }
@@ -1237,9 +1237,7 @@
         // property must be found in the object for the stub to be
         // applicable.
         if (!receiver_is_holder) break;
-        return is_strong(language_mode())
-                   ? isolate()->builtins()->LoadIC_Normal_Strong()
-                   : isolate()->builtins()->LoadIC_Normal();
+        return isolate()->builtins()->LoadIC_Normal();
       }
 
       // -------------- Fields --------------
@@ -1349,8 +1347,7 @@
 
   CodeHandleList handlers(target_receiver_maps.length());
   ElementHandlerCompiler compiler(isolate());
-  compiler.CompileElementHandlers(&target_receiver_maps, &handlers,
-                                  language_mode());
+  compiler.CompileElementHandlers(&target_receiver_maps, &handlers);
   ConfigureVectorState(Handle<Name>::null(), &target_receiver_maps, &handlers);
   return null_handle;
 }
@@ -1361,8 +1358,7 @@
   if (MigrateDeprecated(object)) {
     Handle<Object> result;
     ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(), result,
-        Runtime::GetObjectProperty(isolate(), object, key, language_mode()),
+        isolate(), result, Runtime::GetObjectProperty(isolate(), object, key),
         Object);
     return result;
   }
@@ -1378,7 +1374,8 @@
     ASSIGN_RETURN_ON_EXCEPTION(isolate(), load_handle,
                                LoadIC::Load(object, Handle<Name>::cast(key)),
                                Object);
-  } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
+  } else if (FLAG_use_ic && !object->IsAccessCheckNeeded() &&
+             !object->IsJSValue()) {
     if (object->IsJSObject() || (object->IsString() && key->IsNumber())) {
       Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
       if (object->IsString() || key->IsSmi()) stub = LoadElementStub(receiver);
@@ -1389,7 +1386,7 @@
   if (!is_vector_set() || stub.is_null()) {
     Code* generic = *megamorphic_stub();
     if (!stub.is_null() && *stub == generic) {
-      ConfigureVectorState(MEGAMORPHIC);
+      ConfigureVectorState(MEGAMORPHIC, key);
       TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
     }
 
@@ -1399,10 +1396,9 @@
   if (!load_handle.is_null()) return load_handle;
 
   Handle<Object> result;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate(), result,
-      Runtime::GetObjectProperty(isolate(), object, key, language_mode()),
-      Object);
+  ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+                             Runtime::GetObjectProperty(isolate(), object, key),
+                             Object);
   return result;
 }
 
@@ -1410,9 +1406,10 @@
 bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
                              JSReceiver::StoreFromKeyed store_mode) {
   // Disable ICs for non-JSObjects for now.
-  Handle<Object> receiver = it->GetReceiver();
-  if (!receiver->IsJSObject()) return false;
-  DCHECK(!Handle<JSObject>::cast(receiver)->map()->is_deprecated());
+  Handle<Object> object = it->GetReceiver();
+  if (!object->IsJSObject()) return false;
+  Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+  DCHECK(!receiver->map()->is_deprecated());
 
   for (; it->IsFound(); it->Next()) {
     switch (it->state()) {
@@ -1451,21 +1448,24 @@
         }
 
         // Receiver != holder.
-        PrototypeIterator iter(it->isolate(), receiver);
         if (receiver->IsJSGlobalProxy()) {
+          PrototypeIterator iter(it->isolate(), receiver);
           return it->GetHolder<Object>().is_identical_to(
               PrototypeIterator::GetCurrent(iter));
         }
 
         if (it->HolderIsReceiverOrHiddenPrototype()) return false;
 
-        it->PrepareTransitionToDataProperty(value, NONE, store_mode);
+        if (it->ExtendingNonExtensible(receiver)) return false;
+        it->PrepareTransitionToDataProperty(receiver, value, NONE, store_mode);
         return it->IsCacheableTransition();
       }
     }
   }
 
-  it->PrepareTransitionToDataProperty(value, NONE, store_mode);
+  receiver = it->GetStoreTarget();
+  if (it->ExtendingNonExtensible(receiver)) return false;
+  it->PrepareTransitionToDataProperty(receiver, value, NONE, store_mode);
   return it->IsCacheableTransition();
 }
 
@@ -1479,7 +1479,7 @@
     // Rewrite to the generic keyed store stub.
     if (FLAG_use_ic) {
       if (UseVector()) {
-        ConfigureVectorState(MEGAMORPHIC);
+        ConfigureVectorState(MEGAMORPHIC, name);
       } else if (!AddressIsDeoptimizedCode()) {
         set_target(*megamorphic_stub());
       }
@@ -1510,7 +1510,7 @@
       }
 
       Handle<Object> previous_value =
-          FixedArray::get(script_context, lookup_result.slot_index);
+          FixedArray::get(*script_context, lookup_result.slot_index, isolate());
 
       if (*previous_value == *isolate()->factory()->the_hole_value()) {
         // Do not install stubs and stay pre-monomorphic for
@@ -1564,18 +1564,18 @@
   return value;
 }
 
-
 Handle<Code> CallIC::initialize_stub(Isolate* isolate, int argc,
-                                     ConvertReceiverMode mode) {
-  CallICTrampolineStub stub(isolate, CallICState(argc, mode));
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
+  CallICTrampolineStub stub(isolate, CallICState(argc, mode, tail_call_mode));
   Handle<Code> code = stub.GetCode();
   return code;
 }
 
-
 Handle<Code> CallIC::initialize_stub_in_optimized_code(
-    Isolate* isolate, int argc, ConvertReceiverMode mode) {
-  CallICStub stub(isolate, CallICState(argc, mode));
+    Isolate* isolate, int argc, ConvertReceiverMode mode,
+    TailCallMode tail_call_mode) {
+  CallICStub stub(isolate, CallICState(argc, mode, tail_call_mode));
   Handle<Code> code = stub.GetCode();
   return code;
 }
@@ -1653,7 +1653,7 @@
   if (state() == UNINITIALIZED) {
     // This is the first time we execute this inline cache. Set the target to
     // the pre monomorphic stub to delay setting the monomorphic state.
-    ConfigureVectorState(PREMONOMORPHIC);
+    ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
     TRACE_IC("StoreIC", lookup->name());
     return;
   }
@@ -1693,8 +1693,7 @@
   // This is currently guaranteed by checks in StoreIC::Store.
   Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
   Handle<JSObject> holder = lookup->GetHolder<JSObject>();
-  DCHECK(!receiver->IsAccessCheckNeeded() ||
-         isolate()->IsInternallyUsedPropertyName(lookup->name()));
+  DCHECK(!receiver->IsAccessCheckNeeded() || lookup->name()->IsPrivate());
 
   switch (lookup->state()) {
     case LookupIterator::TRANSITION: {
@@ -1733,9 +1732,8 @@
         break;
       }
       Handle<Object> accessors = lookup->GetAccessors();
-      if (accessors->IsExecutableAccessorInfo()) {
-        Handle<ExecutableAccessorInfo> info =
-            Handle<ExecutableAccessorInfo>::cast(accessors);
+      if (accessors->IsAccessorInfo()) {
+        Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
         if (v8::ToCData<Address>(info->setter()) == 0) {
           TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == 0");
           break;
@@ -1746,13 +1744,14 @@
                            "special data property in prototype chain");
           break;
         }
-        if (!ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info,
-                                                             receiver_map())) {
+        if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
+                                                   receiver_map())) {
           TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
           break;
         }
         NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
-        return compiler.CompileStoreCallback(receiver, lookup->name(), info);
+        return compiler.CompileStoreCallback(receiver, lookup->name(), info,
+                                             language_mode());
       } else if (accessors->IsAccessorPair()) {
         Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
                               isolate());
@@ -1800,9 +1799,8 @@
         bool use_stub = true;
         if (lookup->representation().IsHeapObject()) {
           // Only use a generic stub if no types need to be tracked.
-          Handle<HeapType> field_type = lookup->GetFieldType();
-          HeapType::Iterator<Map> it = field_type->Classes();
-          use_stub = it.Done();
+          Handle<FieldType> field_type = lookup->GetFieldType();
+          use_stub = !field_type->IsClass();
         }
         if (use_stub) {
           StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
@@ -2082,7 +2080,7 @@
                        JSReceiver::MAY_BE_STORE_FROM_KEYED),
         Object);
     if (!is_vector_set()) {
-      ConfigureVectorState(MEGAMORPHIC);
+      ConfigureVectorState(MEGAMORPHIC, key);
       TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
                        "unhandled internalized string key");
       TRACE_IC("StoreIC", key);
@@ -2157,7 +2155,7 @@
   if (!is_vector_set() || stub.is_null()) {
     Code* megamorphic = *megamorphic_stub();
     if (!stub.is_null() && (*stub == megamorphic || *stub == *slow_stub())) {
-      ConfigureVectorState(MEGAMORPHIC);
+      ConfigureVectorState(MEGAMORPHIC, key);
       TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
                        *stub == megamorphic ? "set generic" : "slow stub");
     }
@@ -2220,6 +2218,7 @@
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
   Handle<Object> function = args.at<Object>(0);
@@ -2236,6 +2235,7 @@
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Name> key = args.at<Name>(1);
@@ -2268,6 +2268,7 @@
 // Used from ic-<arch>.cc
 RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
@@ -2287,6 +2288,7 @@
 
 RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
@@ -2308,6 +2310,7 @@
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Name> key = args.at<Name>(1);
@@ -2339,6 +2342,7 @@
 
 RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Name> key = args.at<Name>(1);
@@ -2391,6 +2395,7 @@
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
@@ -2412,6 +2417,7 @@
 
 RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
@@ -2469,6 +2475,7 @@
 
 RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   // Length == 5 or 6, depending on whether the vector slot
   // is passed in a virtual register or not.
@@ -2504,60 +2511,52 @@
     default:
       UNREACHABLE();
     case Token::ADD:
-      ASSIGN_RETURN_ON_EXCEPTION(
-          isolate(), result,
-          Object::Add(isolate(), left, right, state.strength()), Object);
+      ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+                                 Object::Add(isolate(), left, right), Object);
       break;
     case Token::SUB:
       ASSIGN_RETURN_ON_EXCEPTION(
-          isolate(), result,
-          Object::Subtract(isolate(), left, right, state.strength()), Object);
+          isolate(), result, Object::Subtract(isolate(), left, right), Object);
       break;
     case Token::MUL:
       ASSIGN_RETURN_ON_EXCEPTION(
-          isolate(), result,
-          Object::Multiply(isolate(), left, right, state.strength()), Object);
+          isolate(), result, Object::Multiply(isolate(), left, right), Object);
       break;
     case Token::DIV:
       ASSIGN_RETURN_ON_EXCEPTION(
-          isolate(), result,
-          Object::Divide(isolate(), left, right, state.strength()), Object);
+          isolate(), result, Object::Divide(isolate(), left, right), Object);
       break;
     case Token::MOD:
       ASSIGN_RETURN_ON_EXCEPTION(
-          isolate(), result,
-          Object::Modulus(isolate(), left, right, state.strength()), Object);
+          isolate(), result, Object::Modulus(isolate(), left, right), Object);
       break;
     case Token::BIT_OR:
       ASSIGN_RETURN_ON_EXCEPTION(
-          isolate(), result,
-          Object::BitwiseOr(isolate(), left, right, state.strength()), Object);
+          isolate(), result, Object::BitwiseOr(isolate(), left, right), Object);
       break;
     case Token::BIT_AND:
-      ASSIGN_RETURN_ON_EXCEPTION(
-          isolate(), result,
-          Object::BitwiseAnd(isolate(), left, right, state.strength()), Object);
+      ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+                                 Object::BitwiseAnd(isolate(), left, right),
+                                 Object);
       break;
     case Token::BIT_XOR:
-      ASSIGN_RETURN_ON_EXCEPTION(
-          isolate(), result,
-          Object::BitwiseXor(isolate(), left, right, state.strength()), Object);
+      ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+                                 Object::BitwiseXor(isolate(), left, right),
+                                 Object);
       break;
     case Token::SAR:
-      ASSIGN_RETURN_ON_EXCEPTION(
-          isolate(), result,
-          Object::ShiftRight(isolate(), left, right, state.strength()), Object);
+      ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+                                 Object::ShiftRight(isolate(), left, right),
+                                 Object);
       break;
     case Token::SHR:
       ASSIGN_RETURN_ON_EXCEPTION(
-          isolate(), result,
-          Object::ShiftRightLogical(isolate(), left, right, state.strength()),
+          isolate(), result, Object::ShiftRightLogical(isolate(), left, right),
           Object);
       break;
     case Token::SHL:
       ASSIGN_RETURN_ON_EXCEPTION(
-          isolate(), result,
-          Object::ShiftLeft(isolate(), left, right, state.strength()), Object);
+          isolate(), result, Object::ShiftLeft(isolate(), left, right), Object);
       break;
   }
 
@@ -2624,6 +2623,7 @@
 
 RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
   Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft);
@@ -2639,6 +2639,7 @@
 
 RUNTIME_FUNCTION(Runtime_BinaryOpIC_MissWithAllocationSite) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   DCHECK_EQ(3, args.length());
   Handle<AllocationSite> allocation_site =
@@ -2653,10 +2654,8 @@
   return *result;
 }
 
-
-Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op,
-                                     Strength strength) {
-  CompareICStub stub(isolate, op, strength, CompareICState::UNINITIALIZED,
+Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
+  CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
                      CompareICState::UNINITIALIZED,
                      CompareICState::UNINITIALIZED);
   Code* code = NULL;
@@ -2664,10 +2663,8 @@
   return code;
 }
 
-
-Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op,
-                                         Strength strength) {
-  CompareICStub stub(isolate, op, strength, CompareICState::UNINITIALIZED,
+Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
+  CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
                      CompareICState::UNINITIALIZED,
                      CompareICState::UNINITIALIZED);
   return stub.GetCode();
@@ -2684,8 +2681,7 @@
   CompareICState::State state = CompareICState::TargetState(
       old_stub.state(), old_stub.left(), old_stub.right(), op_,
       HasInlinedSmiCode(address()), x, y);
-  CompareICStub stub(isolate(), op_, old_stub.strength(), new_left, new_right,
-                     state);
+  CompareICStub stub(isolate(), op_, new_left, new_right, state);
   if (state == CompareICState::KNOWN_RECEIVER) {
     stub.set_known_map(
         Handle<Map>(Handle<JSReceiver>::cast(x)->map(), isolate()));
@@ -2718,6 +2714,7 @@
 // Used from CompareICStub::GenerateMiss in code-stubs-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_CompareIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
   CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
@@ -2740,15 +2737,6 @@
 }
 
 
-Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate, NilValue nil,
-                                              Handle<Object> object) {
-  if (object->IsNull() || object->IsUndefined()) {
-    return isolate->factory()->true_value();
-  }
-  return isolate->factory()->ToBoolean(object->IsUndetectableObject());
-}
-
-
 Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
   ExtraICState extra_ic_state = target()->extra_ic_state();
 
@@ -2760,8 +2748,6 @@
 
   stub.UpdateStatus(object);
 
-  NilValue nil = stub.nil_value();
-
   // Find or create the specialized stub to support the new set of types.
   Handle<Code> code;
   if (stub.IsMonomorphic()) {
@@ -2773,12 +2759,13 @@
     code = stub.GetCode();
   }
   set_target(*code);
-  return DoCompareNilSlow(isolate(), nil, object);
+  return isolate()->factory()->ToBoolean(object->IsUndetectableObject());
 }
 
 
 RUNTIME_FUNCTION(Runtime_CompareNilIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   Handle<Object> object = args.at<Object>(0);
   CompareNilIC ic(isolate);
@@ -2804,6 +2791,7 @@
 
 RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   DCHECK(args.length() == 1);
   HandleScope scope(isolate);
   Handle<Object> object = args.at<Object>(0);
@@ -2818,13 +2806,13 @@
   Handle<HeapObject> callback_or_cell = args.at<HeapObject>(2);
   Handle<Name> name = args.at<Name>(3);
   Handle<Object> value = args.at<Object>(4);
+  CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
   HandleScope scope(isolate);
 
-  Handle<ExecutableAccessorInfo> callback(
+  Handle<AccessorInfo> callback(
       callback_or_cell->IsWeakCell()
-          ? ExecutableAccessorInfo::cast(
-                WeakCell::cast(*callback_or_cell)->value())
-          : ExecutableAccessorInfo::cast(*callback_or_cell));
+          ? AccessorInfo::cast(WeakCell::cast(*callback_or_cell)->value())
+          : AccessorInfo::cast(*callback_or_cell));
 
   DCHECK(callback->IsCompatibleReceiver(*receiver));
 
@@ -2834,8 +2822,10 @@
   DCHECK(fun != NULL);
 
   LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name));
+  Object::ShouldThrow should_throw =
+      is_sloppy(language_mode) ? Object::DONT_THROW : Object::THROW_ON_ERROR;
   PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
-                                        *holder);
+                                        *holder, should_throw);
   custom_args.Call(fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
   RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
   return *value;
@@ -2914,9 +2904,10 @@
   Handle<Object> value = args.at<Object>(2);
 #ifdef DEBUG
   PrototypeIterator iter(isolate, receiver,
-                         PrototypeIterator::START_AT_RECEIVER);
+                         PrototypeIterator::START_AT_RECEIVER,
+                         PrototypeIterator::END_AT_NON_HIDDEN);
   bool found = false;
-  for (; !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
+  for (; !iter.IsAtEnd(); iter.Advance()) {
     Handle<Object> current = PrototypeIterator::GetCurrent(iter);
     if (current->IsJSObject() &&
         Handle<JSObject>::cast(current)->HasNamedInterceptor()) {
@@ -2941,17 +2932,15 @@
   DCHECK(args.smi_at(1) >= 0);
   uint32_t index = args.smi_at(1);
   Handle<Object> result;
-  // TODO(conradw): Investigate strong mode semantics for this.
-  LanguageMode language_mode = SLOPPY;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Object::GetElement(isolate, receiver, index, language_mode));
+      isolate, result, Object::GetElement(isolate, receiver, index));
   return *result;
 }
 
 
 RUNTIME_FUNCTION(Runtime_LoadIC_MissFromStubFailure) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
   HandleScope scope(isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Name> key = args.at<Name>(1);
diff --git a/src/ic/ic.h b/src/ic/ic.h
index a3265d7..0a324a8 100644
--- a/src/ic/ic.h
+++ b/src/ic/ic.h
@@ -112,7 +112,7 @@
   }
 
   // Configure for most states.
-  void ConfigureVectorState(IC::State new_state);
+  void ConfigureVectorState(IC::State new_state, Handle<Object> key);
   // Configure the vector for MONOMORPHIC.
   void ConfigureVectorState(Handle<Name> name, Handle<Map> map,
                             Handle<Code> handler);
@@ -284,9 +284,11 @@
 
   // Code generator routines.
   static Handle<Code> initialize_stub(Isolate* isolate, int argc,
-                                      ConvertReceiverMode mode);
+                                      ConvertReceiverMode mode,
+                                      TailCallMode tail_call_mode);
   static Handle<Code> initialize_stub_in_optimized_code(
-      Isolate* isolate, int argc, ConvertReceiverMode mode);
+      Isolate* isolate, int argc, ConvertReceiverMode mode,
+      TailCallMode tail_call_mode);
 
   static void Clear(Isolate* isolate, Code* host, CallICNexus* nexus);
 };
@@ -294,19 +296,10 @@
 
 class LoadIC : public IC {
  public:
-  static ExtraICState ComputeExtraICState(TypeofMode typeof_mode,
-                                          LanguageMode language_mode) {
-    return LoadICState(typeof_mode, language_mode).GetExtraICState();
-  }
-
   TypeofMode typeof_mode() const {
     return LoadICState::GetTypeofMode(extra_ic_state());
   }
 
-  LanguageMode language_mode() const {
-    return LoadICState::GetLanguageMode(extra_ic_state());
-  }
-
   LoadIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
       : IC(depth, isolate, nexus) {
     DCHECK(nexus != NULL);
@@ -321,9 +314,8 @@
 
   static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
   static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                         LanguageMode language_mode);
-  static void GenerateNormal(MacroAssembler* masm, LanguageMode language_mode);
+  static void GenerateRuntimeGetProperty(MacroAssembler* masm);
+  static void GenerateNormal(MacroAssembler* masm);
 
   static Handle<Code> initialize_stub(Isolate* isolate,
                                       ExtraICState extra_state);
@@ -340,14 +332,10 @@
 
   Handle<Code> slow_stub() const {
     if (kind() == Code::LOAD_IC) {
-      return is_strong(language_mode())
-                 ? isolate()->builtins()->LoadIC_Slow_Strong()
-                 : isolate()->builtins()->LoadIC_Slow();
+      return isolate()->builtins()->LoadIC_Slow();
     } else {
       DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
-      return is_strong(language_mode())
-                 ? isolate()->builtins()->KeyedLoadIC_Slow_Strong()
-                 : isolate()->builtins()->KeyedLoadIC_Slow();
+      return isolate()->builtins()->KeyedLoadIC_Slow();
     }
   }
 
@@ -372,21 +360,6 @@
 
 class KeyedLoadIC : public LoadIC {
  public:
-  // ExtraICState bits (building on IC)
-  class IcCheckTypeField
-      : public BitField<IcCheckType, LoadICState::kNextBitFieldOffset, 1> {};
-
-  static ExtraICState ComputeExtraICState(TypeofMode typeof_mode,
-                                          LanguageMode language_mode,
-                                          IcCheckType key_type) {
-    return LoadICState(typeof_mode, language_mode).GetExtraICState() |
-           IcCheckTypeField::encode(key_type);
-  }
-
-  static IcCheckType GetKeyType(ExtraICState extra_state) {
-    return IcCheckTypeField::decode(extra_state);
-  }
-
   KeyedLoadIC(FrameDepth depth, Isolate* isolate,
               KeyedLoadICNexus* nexus = NULL)
       : LoadIC(depth, isolate, nexus) {
@@ -399,11 +372,9 @@
 
   // Code generator routines.
   static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                         LanguageMode language_mode);
+  static void GenerateRuntimeGetProperty(MacroAssembler* masm);
   static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
-  static void GenerateMegamorphic(MacroAssembler* masm,
-                                  LanguageMode language_mode);
+  static void GenerateMegamorphic(MacroAssembler* masm);
 
   // Bit mask to be tested against bit field for the cases when
   // generic stub should go into slow case.
@@ -616,8 +587,7 @@
   static Condition ComputeCondition(Token::Value op);
 
   // Factory method for getting an uninitialized compare stub.
-  static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op,
-                                       Strength strength);
+  static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
 
  private:
   static bool HasInlinedSmiCode(Address address);
@@ -625,8 +595,7 @@
   bool strict() const { return op_ == Token::EQ_STRICT; }
   Condition GetCondition() const { return ComputeCondition(op_); }
 
-  static Code* GetRawUninitialized(Isolate* isolate, Token::Value op,
-                                   Strength strength);
+  static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
 
   static void Clear(Isolate* isolate, Address address, Code* target,
                     Address constant_pool);
@@ -646,9 +615,6 @@
   static Handle<Code> GetUninitialized();
 
   static void Clear(Address address, Code* target, Address constant_pool);
-
-  static Handle<Object> DoCompareNilSlow(Isolate* isolate, NilValue nil,
-                                         Handle<Object> object);
 };
 
 
diff --git a/src/ic/mips/handler-compiler-mips.cc b/src/ic/mips/handler-compiler-mips.cc
index 554d0c5..f3af1cf 100644
--- a/src/ic/mips/handler-compiler-mips.cc
+++ b/src/ic/mips/handler-compiler-mips.cc
@@ -4,8 +4,10 @@
 
 #if V8_TARGET_ARCH_MIPS
 
-#include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 
@@ -279,9 +281,16 @@
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
-    __ lw(data, FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
-    __ lw(data, FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
-    __ lw(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    if (optimization.is_constant_call()) {
+      __ lw(data,
+            FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+      __ lw(data,
+            FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+      __ lw(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    } else {
+      __ lw(data,
+            FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+    }
     __ lw(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
   }
 
@@ -299,7 +308,8 @@
   __ li(api_function_address, Operand(ref));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+                           !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
 
@@ -383,8 +393,7 @@
   __ Branch(miss_label, ne, value_reg, Operand(scratch));
 }
 
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
                                                         Register value_reg,
                                                         Label* miss_label) {
   Register map_reg = scratch1();
@@ -392,21 +401,11 @@
   DCHECK(!value_reg.is(map_reg));
   DCHECK(!value_reg.is(scratch));
   __ JumpIfSmi(value_reg, miss_label);
-  HeapType::Iterator<Map> it = field_type->Classes();
-  if (!it.Done()) {
+  if (field_type->IsClass()) {
     __ lw(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    Label do_store;
-    while (true) {
-      // Compare map directly within the Branch() functions.
-      __ GetWeakValue(scratch, Map::WeakCellForMap(it.Current()));
-      it.Advance();
-      if (it.Done()) {
-        __ Branch(miss_label, ne, map_reg, Operand(scratch));
-        break;
-      }
-      __ Branch(&do_store, eq, map_reg, Operand(scratch));
-    }
-    __ bind(&do_store);
+    // Compare map directly within the Branch() functions.
+    __ GetWeakValue(scratch, Map::WeakCellForMap(field_type->AsClass()));
+    __ Branch(miss_label, ne, map_reg, Operand(scratch));
   }
 }
 
@@ -584,42 +583,51 @@
 
 
 void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<ExecutableAccessorInfo> callback) {
-  // Build AccessorInfo::args_ list on the stack and push property name below
-  // the exit frame to make GC aware of them and store pointers to them.
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
-  DCHECK(!scratch2().is(reg));
-  DCHECK(!scratch3().is(reg));
-  DCHECK(!scratch4().is(reg));
-  __ push(receiver());
+    Register reg, Handle<AccessorInfo> callback) {
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+  // Here and below +1 is for name() pushed after the args_ array.
+  typedef PropertyCallbackArguments PCA;
+  __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+  __ sw(receiver(), MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
   Handle<Object> data(callback->data(), isolate());
   if (data->IsUndefined() || data->IsSmi()) {
-    __ li(scratch3(), data);
+    __ li(scratch2(), data);
   } else {
     Handle<WeakCell> cell =
         isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
     // The callback is alive if this instruction is executed,
     // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch3(), cell);
+    __ GetWeakValue(scratch2(), cell);
   }
-  __ Subu(sp, sp, 6 * kPointerSize);
-  __ sw(scratch3(), MemOperand(sp, 5 * kPointerSize));
-  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
-  __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
-  __ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
-  __ li(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
-  __ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
-  __ sw(reg, MemOperand(sp, 1 * kPointerSize));
-  __ sw(name(), MemOperand(sp, 0 * kPointerSize));
-  __ Addu(scratch2(), sp, 1 * kPointerSize);
+  __ sw(scratch2(), MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+  __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
+  __ sw(scratch2(),
+        MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+  __ sw(scratch2(), MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+                                       kPointerSize));
+  __ li(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+  __ sw(scratch2(), MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+  __ sw(reg, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+  // should_throw_on_error -> false
+  DCHECK(Smi::FromInt(0) == nullptr);
+  __ sw(zero_reg,
+        MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
 
-  __ mov(a2, scratch2());  // Saved in case scratch2 == a1.
+  __ sw(name(), MemOperand(sp, 0 * kPointerSize));
+
   // Abi for CallApiGetter.
   Register getter_address_reg = ApiGetterDescriptor::function_address();
 
@@ -705,8 +713,8 @@
 
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
-    Handle<JSObject> object, Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+    LanguageMode language_mode) {
   Register holder_reg = Frontend(name);
 
   __ Push(receiver(), holder_reg);  // Receiver.
@@ -721,6 +729,7 @@
   __ push(at);
   __ li(at, Operand(name));
   __ Push(at, value());
+  __ Push(Smi::FromInt(language_mode));
 
   // Do tail-call to the runtime system.
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
@@ -769,7 +778,7 @@
   }
 
   Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+  __ IncrementCounter(counters->ic_named_load_global_stub(), 1, a1, a3);
   if (IC::ICUseVector(kind())) {
     DiscardVectorAndSlot();
   }
diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc
index a27d6b5..ae3615e 100644
--- a/src/ic/mips/ic-mips.cc
+++ b/src/ic/mips/ic-mips.cc
@@ -159,8 +159,7 @@
 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
                                   Register key, Register elements,
                                   Register scratch1, Register scratch2,
-                                  Register result, Label* slow,
-                                  LanguageMode language_mode) {
+                                  Register result, Label* slow) {
   // Register use:
   //
   // receiver - holds the receiver on entry.
@@ -216,13 +215,8 @@
   __ Branch(&check_next_prototype);
 
   __ bind(&absent);
-  if (is_strong(language_mode)) {
-    // Strong mode accesses must throw in this case, so call the runtime.
-    __ Branch(slow);
-  } else {
-    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-    __ Branch(&done);
-  }
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ Branch(&done);
 
   __ bind(&in_bounds);
   // Fast case: Do the load.
@@ -230,8 +224,7 @@
           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   // The key is a smi.
   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
-  __ addu(at, at, scratch1);
+  __ Lsa(at, scratch1, key, kPointerSizeLog2 - kSmiTagSize);
   __ lw(scratch2, MemOperand(at));
 
   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -271,8 +264,7 @@
   __ bind(&unique);
 }
 
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = a0;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -287,7 +279,7 @@
 
   // Dictionary load failed, go slow (but don't miss).
   __ bind(&slow);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  GenerateRuntimeGetProperty(masm);
 }
 
 
@@ -311,7 +303,7 @@
 
   DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
                      LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->load_miss(), 1, t0, t1);
+  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, t0, t1);
 
   LoadIC_PushArgs(masm);
 
@@ -319,17 +311,14 @@
   __ TailCallRuntime(Runtime::kLoadIC_Miss);
 }
 
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                        LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is in ra.
 
   __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
   __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
-                                              : Runtime::kGetProperty);
+  __ TailCallRuntime(Runtime::kGetProperty);
 }
 
 
@@ -339,7 +328,7 @@
 
   DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
                      LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, t0, t1);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, t0, t1);
 
   LoadIC_PushArgs(masm);
 
@@ -347,21 +336,16 @@
   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
 }
 
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                             LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is in ra.
 
   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
-                                              : Runtime::kKeyedGetProperty);
+  __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
-                                      LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // The return address is in ra.
   Label slow, check_name, index_smi, index_name, property_array_property;
   Label probe_dictionary, check_number_dictionary;
@@ -385,9 +369,9 @@
   // Check the receiver's map to see if it has fast elements.
   __ CheckFastElements(a0, a3, &check_number_dictionary);
 
-  GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow,
-                        language_mode);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3);
+  GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, t0,
+                      a3);
   __ Ret();
 
   __ bind(&check_number_dictionary);
@@ -405,9 +389,9 @@
 
   // Slow case, key and receiver still in a2 and a1.
   __ bind(&slow);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, t0,
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, t0,
                       a3);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  GenerateRuntimeGetProperty(masm);
 
   __ bind(&check_name);
   GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
@@ -451,8 +435,8 @@
   GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
   // Load the property to v0.
   GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, t0,
-                      a3);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
+                      t0, a3);
   __ Ret();
 
   __ bind(&index_name);
@@ -491,8 +475,7 @@
   // there may be a callback on the element.
   Label holecheck_passed1;
   __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
-  __ addu(address, address, at);
+  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
   __ lw(scratch, MemOperand(address));
   __ Branch(&holecheck_passed1, ne, scratch,
             Operand(masm->isolate()->factory()->the_hole_value()));
@@ -511,8 +494,7 @@
   }
   // It's irrelevant whether array is smi-only or not when writing a smi.
   __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(address, address, scratch);
+  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
   __ sw(value, MemOperand(address));
   __ Ret();
 
@@ -528,8 +510,7 @@
     __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
   }
   __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(address, address, scratch);
+  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
   __ sw(value, MemOperand(address));
   // Update write barrier for the elements array address.
   __ mov(scratch, value);  // Preserve the value which is returned.
@@ -550,8 +531,7 @@
   // go to the runtime.
   __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
                                      kHoleNanUpper32Offset - kHeapObjectTag));
-  __ sll(at, key, kPointerSizeLog2);
-  __ addu(address, address, at);
+  __ Lsa(address, address, key, kPointerSizeLog2);
   __ lw(scratch, MemOperand(address));
   __ Branch(&fast_double_without_map_check, ne, scratch,
             Operand(kHoleNanUpper32));
@@ -791,11 +771,11 @@
 
   GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1, t2, t5);
+  __ IncrementCounter(counters->ic_store_normal_hit(), 1, t2, t5);
   __ Ret();
 
   __ bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1, t2, t5);
+  __ IncrementCounter(counters->ic_store_normal_miss(), 1, t2, t5);
   GenerateMiss(masm);
 }
 
diff --git a/src/ic/mips/stub-cache-mips.cc b/src/ic/mips/stub-cache-mips.cc
index 1a9897e..039763c 100644
--- a/src/ic/mips/stub-cache-mips.cc
+++ b/src/ic/mips/stub-cache-mips.cc
@@ -42,13 +42,11 @@
   scratch = no_reg;
 
   // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ sll(offset_scratch, offset, 1);
-  __ Addu(offset_scratch, offset_scratch, offset);
+  __ Lsa(offset_scratch, offset, offset, 1);
 
   // Calculate the base address of the entry.
   __ li(base_addr, Operand(key_offset));
-  __ sll(at, offset_scratch, kPointerSizeLog2);
-  __ Addu(base_addr, base_addr, at);
+  __ Lsa(base_addr, base_addr, offset_scratch, kPointerSizeLog2);
 
   // Check that the key in the entry matches the name.
   __ lw(at, MemOperand(base_addr, 0));
diff --git a/src/ic/mips64/handler-compiler-mips64.cc b/src/ic/mips64/handler-compiler-mips64.cc
index d94a292..968effd 100644
--- a/src/ic/mips64/handler-compiler-mips64.cc
+++ b/src/ic/mips64/handler-compiler-mips64.cc
@@ -4,8 +4,10 @@
 
 #if V8_TARGET_ARCH_MIPS64
 
-#include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 
@@ -279,9 +281,16 @@
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
-    __ ld(data, FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
-    __ ld(data, FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
-    __ ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    if (optimization.is_constant_call()) {
+      __ ld(data,
+            FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+      __ ld(data,
+            FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+      __ ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    } else {
+      __ ld(data,
+            FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+    }
     __ ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
   }
 
@@ -299,7 +308,8 @@
   __ li(api_function_address, Operand(ref));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+                           !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
 
@@ -383,8 +393,7 @@
   __ Branch(miss_label, ne, value_reg, Operand(scratch));
 }
 
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
                                                         Register value_reg,
                                                         Label* miss_label) {
   Register map_reg = scratch1();
@@ -392,21 +401,11 @@
   DCHECK(!value_reg.is(map_reg));
   DCHECK(!value_reg.is(scratch));
   __ JumpIfSmi(value_reg, miss_label);
-  HeapType::Iterator<Map> it = field_type->Classes();
-  if (!it.Done()) {
+  if (field_type->IsClass()) {
     __ ld(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    Label do_store;
-    while (true) {
-      // Compare map directly within the Branch() functions.
-      __ GetWeakValue(scratch, Map::WeakCellForMap(it.Current()));
-      it.Advance();
-      if (it.Done()) {
-        __ Branch(miss_label, ne, map_reg, Operand(scratch));
-        break;
-      }
-      __ Branch(&do_store, eq, map_reg, Operand(scratch));
-    }
-    __ bind(&do_store);
+    // Compare map directly within the Branch() functions.
+    __ GetWeakValue(scratch, Map::WeakCellForMap(field_type->AsClass()));
+    __ Branch(miss_label, ne, map_reg, Operand(scratch));
   }
 }
 
@@ -584,42 +583,51 @@
 
 
 void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<ExecutableAccessorInfo> callback) {
-  // Build AccessorInfo::args_ list on the stack and push property name below
-  // the exit frame to make GC aware of them and store pointers to them.
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
-  DCHECK(!scratch2().is(reg));
-  DCHECK(!scratch3().is(reg));
-  DCHECK(!scratch4().is(reg));
-  __ push(receiver());
+    Register reg, Handle<AccessorInfo> callback) {
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+  // Here and below +1 is for name() pushed after the args_ array.
+  typedef PropertyCallbackArguments PCA;
+  __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+  __ sd(receiver(), MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
   Handle<Object> data(callback->data(), isolate());
   if (data->IsUndefined() || data->IsSmi()) {
-    __ li(scratch3(), data);
+    __ li(scratch2(), data);
   } else {
     Handle<WeakCell> cell =
         isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
     // The callback is alive if this instruction is executed,
     // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch3(), cell);
+    __ GetWeakValue(scratch2(), cell);
   }
-  __ Dsubu(sp, sp, 6 * kPointerSize);
-  __ sd(scratch3(), MemOperand(sp, 5 * kPointerSize));
-  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
-  __ sd(scratch3(), MemOperand(sp, 4 * kPointerSize));
-  __ sd(scratch3(), MemOperand(sp, 3 * kPointerSize));
-  __ li(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
-  __ sd(scratch4(), MemOperand(sp, 2 * kPointerSize));
-  __ sd(reg, MemOperand(sp, 1 * kPointerSize));
-  __ sd(name(), MemOperand(sp, 0 * kPointerSize));
-  __ Daddu(scratch2(), sp, 1 * kPointerSize);
+  __ sd(scratch2(), MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+  __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
+  __ sd(scratch2(),
+        MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+  __ sd(scratch2(), MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+                                       kPointerSize));
+  __ li(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+  __ sd(scratch2(), MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+  __ sd(reg, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+  // should_throw_on_error -> false
+  DCHECK(Smi::FromInt(0) == nullptr);
+  __ sd(zero_reg,
+        MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
 
-  __ mov(a2, scratch2());  // Saved in case scratch2 == a1.
+  __ sd(name(), MemOperand(sp, 0 * kPointerSize));
+
   // Abi for CallApiGetter.
   Register getter_address_reg = ApiGetterDescriptor::function_address();
 
@@ -705,8 +713,8 @@
 
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
-    Handle<JSObject> object, Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+    LanguageMode language_mode) {
   Register holder_reg = Frontend(name);
 
   __ Push(receiver(), holder_reg);  // Receiver.
@@ -721,6 +729,7 @@
   __ push(at);
   __ li(at, Operand(name));
   __ Push(at, value());
+  __ Push(Smi::FromInt(language_mode));
 
   // Do tail-call to the runtime system.
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
@@ -769,7 +778,7 @@
   }
 
   Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+  __ IncrementCounter(counters->ic_named_load_global_stub(), 1, a1, a3);
   if (IC::ICUseVector(kind())) {
     DiscardVectorAndSlot();
   }
diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc
index c5da5fb..f46c9dc 100644
--- a/src/ic/mips64/ic-mips64.cc
+++ b/src/ic/mips64/ic-mips64.cc
@@ -158,8 +158,7 @@
 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
                                   Register key, Register elements,
                                   Register scratch1, Register scratch2,
-                                  Register result, Label* slow,
-                                  LanguageMode language_mode) {
+                                  Register result, Label* slow) {
   // Register use:
   //
   // receiver - holds the receiver on entry.
@@ -215,12 +214,8 @@
   __ Branch(&check_next_prototype);
 
   __ bind(&absent);
-  if (is_strong(language_mode)) {
-    __ Branch(slow);
-  } else {
-    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-    __ Branch(&done);
-  }
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ Branch(&done);
 
   __ bind(&in_bounds);
   // Fast case: Do the load.
@@ -269,8 +264,7 @@
   __ bind(&unique);
 }
 
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = a0;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -284,7 +278,7 @@
 
   // Dictionary load failed, go slow (but don't miss).
   __ bind(&slow);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  GenerateRuntimeGetProperty(masm);
 }
 
 
@@ -308,7 +302,7 @@
 
   DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
                      LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->load_miss(), 1, a4, a5);
+  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, a4, a5);
 
   LoadIC_PushArgs(masm);
 
@@ -316,17 +310,14 @@
   __ TailCallRuntime(Runtime::kLoadIC_Miss);
 }
 
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                        LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is in ra.
 
   __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
   __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
-                                              : Runtime::kGetProperty);
+  __ TailCallRuntime(Runtime::kGetProperty);
 }
 
 
@@ -336,7 +327,7 @@
 
   DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
                      LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a4, a5);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, a4, a5);
 
   LoadIC_PushArgs(masm);
 
@@ -344,21 +335,16 @@
   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
 }
 
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                             LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is in ra.
 
   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
-                                              : Runtime::kKeyedGetProperty);
+  __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
-                                      LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // The return address is in ra.
   Label slow, check_name, index_smi, index_name, property_array_property;
   Label probe_dictionary, check_number_dictionary;
@@ -382,9 +368,9 @@
   // Check the receiver's map to see if it has fast elements.
   __ CheckFastElements(a0, a3, &check_number_dictionary);
 
-  GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow,
-                        language_mode);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3);
+  GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, a4,
+                      a3);
   __ Ret();
 
   __ bind(&check_number_dictionary);
@@ -402,9 +388,9 @@
 
   // Slow case, key and receiver still in a2 and a1.
   __ bind(&slow);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, a4,
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, a4,
                       a3);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  GenerateRuntimeGetProperty(masm);
 
   __ bind(&check_name);
   GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
@@ -448,8 +434,8 @@
   GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
   // Load the property to v0.
   GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, a4,
-                      a3);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
+                      a4, a3);
   __ Ret();
 
   __ bind(&index_name);
@@ -787,11 +773,11 @@
 
   GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1, a6, a7);
+  __ IncrementCounter(counters->ic_store_normal_hit(), 1, a6, a7);
   __ Ret();
 
   __ bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1, a6, a7);
+  __ IncrementCounter(counters->ic_store_normal_miss(), 1, a6, a7);
   GenerateMiss(masm);
 }
 
diff --git a/src/ic/mips64/stub-cache-mips64.cc b/src/ic/mips64/stub-cache-mips64.cc
index 4ab9f8e..0bd7dd0 100644
--- a/src/ic/mips64/stub-cache-mips64.cc
+++ b/src/ic/mips64/stub-cache-mips64.cc
@@ -42,13 +42,11 @@
   scratch = no_reg;
 
   // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ dsll(offset_scratch, offset, 1);
-  __ Daddu(offset_scratch, offset_scratch, offset);
+  __ Dlsa(offset_scratch, offset, offset, 1);
 
   // Calculate the base address of the entry.
   __ li(base_addr, Operand(key_offset));
-  __ dsll(at, offset_scratch, kPointerSizeLog2);
-  __ Daddu(base_addr, base_addr, at);
+  __ Dlsa(base_addr, base_addr, offset_scratch, kPointerSizeLog2);
 
   // Check that the key in the entry matches the name.
   __ ld(at, MemOperand(base_addr, 0));
diff --git a/src/ic/ppc/handler-compiler-ppc.cc b/src/ic/ppc/handler-compiler-ppc.cc
index 8b48755..6e7d78a 100644
--- a/src/ic/ppc/handler-compiler-ppc.cc
+++ b/src/ic/ppc/handler-compiler-ppc.cc
@@ -4,8 +4,10 @@
 
 #if V8_TARGET_ARCH_PPC
 
-#include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 
@@ -284,12 +286,17 @@
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
-    __ LoadP(data,
-             FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
-    __ LoadP(data,
-             FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
-    __ LoadP(data,
-             FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    if (optimization.is_constant_call()) {
+      __ LoadP(data,
+               FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+      __ LoadP(data,
+               FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+      __ LoadP(data,
+               FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    } else {
+      __ LoadP(data,
+               FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+    }
     __ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
   }
 
@@ -308,7 +315,8 @@
   __ mov(api_function_address, Operand(ref));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+                           !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
 
@@ -393,8 +401,7 @@
   __ bne(miss_label);
 }
 
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
                                                         Register value_reg,
                                                         Label* miss_label) {
   Register map_reg = scratch1();
@@ -402,20 +409,11 @@
   DCHECK(!value_reg.is(map_reg));
   DCHECK(!value_reg.is(scratch));
   __ JumpIfSmi(value_reg, miss_label);
-  HeapType::Iterator<Map> it = field_type->Classes();
-  if (!it.Done()) {
+  if (field_type->IsClass()) {
     __ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    Label do_store;
-    while (true) {
-      __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
-      it.Advance();
-      if (it.Done()) {
-        __ bne(miss_label);
-        break;
-      }
-      __ beq(&do_store);
-    }
-    __ bind(&do_store);
+    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+                    scratch);
+    __ bne(miss_label);
   }
 }
 
@@ -592,38 +590,40 @@
 
 
 void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<ExecutableAccessorInfo> callback) {
-  // Build AccessorInfo::args_ list on the stack and push property name below
-  // the exit frame to make GC aware of them and store pointers to them.
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
-  DCHECK(!scratch2().is(reg));
-  DCHECK(!scratch3().is(reg));
-  DCHECK(!scratch4().is(reg));
+    Register reg, Handle<AccessorInfo> callback) {
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
   __ push(receiver());
-  // Push data from ExecutableAccessorInfo.
+  // Push data from AccessorInfo.
   Handle<Object> data(callback->data(), isolate());
   if (data->IsUndefined() || data->IsSmi()) {
-    __ Move(scratch3(), data);
+    __ Move(scratch2(), data);
   } else {
     Handle<WeakCell> cell =
         isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
     // The callback is alive if this instruction is executed,
     // so the weak cell is not cleared and points to data.
-    __ GetWeakValue(scratch3(), cell);
+    __ GetWeakValue(scratch2(), cell);
   }
-  __ push(scratch3());
-  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
-  __ mr(scratch4(), scratch3());
-  __ Push(scratch3(), scratch4());
-  __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
-  __ Push(scratch4(), reg);
-  __ push(name());
+  __ push(scratch2());
+  __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
+  __ Push(scratch2(), scratch2());
+  __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+  // should_throw_on_error -> false
+  __ mov(scratch3(), Operand(Smi::FromInt(0)));
+  __ Push(scratch2(), reg, scratch3(), name());
 
   // Abi for CallApiGetter
   Register getter_address_reg = ApiGetterDescriptor::function_address();
@@ -711,8 +711,8 @@
 
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
-    Handle<JSObject> object, Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+    LanguageMode language_mode) {
   Register holder_reg = Frontend(name);
 
   __ Push(receiver(), holder_reg);  // receiver
@@ -728,6 +728,7 @@
   __ push(ip);
   __ mov(ip, Operand(name));
   __ Push(ip, value());
+  __ Push(Smi::FromInt(language_mode));
 
   // Do tail-call to the runtime system.
   __ TailCallRuntime(Runtime::kStoreCallbackProperty);
@@ -776,7 +777,7 @@
   }
 
   Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1, r4, r6);
+  __ IncrementCounter(counters->ic_named_load_global_stub(), 1, r4, r6);
   if (IC::ICUseVector(kind())) {
     DiscardVectorAndSlot();
   }
diff --git a/src/ic/ppc/ic-ppc.cc b/src/ic/ppc/ic-ppc.cc
index 78daac2..567296c 100644
--- a/src/ic/ppc/ic-ppc.cc
+++ b/src/ic/ppc/ic-ppc.cc
@@ -163,8 +163,7 @@
 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
                                   Register key, Register elements,
                                   Register scratch1, Register scratch2,
-                                  Register result, Label* slow,
-                                  LanguageMode language_mode) {
+                                  Register result, Label* slow) {
   // Register use:
   //
   // receiver - holds the receiver on entry.
@@ -221,13 +220,8 @@
   __ jmp(&check_next_prototype);
 
   __ bind(&absent);
-  if (is_strong(language_mode)) {
-    // Strong mode accesses must throw in this case, so call the runtime.
-    __ jmp(slow);
-  } else {
-    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-    __ jmp(&done);
-  }
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ jmp(&done);
 
   __ bind(&in_bounds);
   // Fast case: Do the load.
@@ -274,8 +268,7 @@
   __ bind(&unique);
 }
 
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = r3;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -290,7 +283,7 @@
 
   // Dictionary load failed, go slow (but don't miss).
   __ bind(&slow);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  GenerateRuntimeGetProperty(masm);
 }
 
 
@@ -314,7 +307,7 @@
 
   DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
                      LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->load_miss(), 1, r7, r8);
+  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r7, r8);
 
   LoadIC_PushArgs(masm);
 
@@ -322,17 +315,14 @@
   __ TailCallRuntime(Runtime::kLoadIC_Miss);
 }
 
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                        LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is in lr.
 
   __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
   __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
-                                              : Runtime::kGetProperty);
+  __ TailCallRuntime(Runtime::kGetProperty);
 }
 
 
@@ -342,7 +332,7 @@
 
   DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
                      LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r7, r8);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r7, r8);
 
   LoadIC_PushArgs(masm);
 
@@ -350,21 +340,16 @@
   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
 }
 
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                             LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is in lr.
 
   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
-                                              : Runtime::kKeyedGetProperty);
+  __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
-                                      LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // The return address is in lr.
   Label slow, check_name, index_smi, index_name, property_array_property;
   Label probe_dictionary, check_number_dictionary;
@@ -388,9 +373,9 @@
   // Check the receiver's map to see if it has fast elements.
   __ CheckFastElements(r3, r6, &check_number_dictionary);
 
-  GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow,
-                        language_mode);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r7, r6);
+  GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r7,
+                      r6);
   __ Ret();
 
   __ bind(&check_number_dictionary);
@@ -409,9 +394,9 @@
 
   // Slow case, key and receiver still in r3 and r4.
   __ bind(&slow);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r7,
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r7,
                       r6);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  GenerateRuntimeGetProperty(masm);
 
   __ bind(&check_name);
   GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow);
@@ -456,8 +441,8 @@
   GenerateGlobalInstanceTypeCheck(masm, r3, &slow);
   // Load the property to r3.
   GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r7,
-                      r6);
+  __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
+                      r7, r6);
   __ Ret();
 
   __ bind(&index_name);
@@ -797,11 +782,11 @@
 
   GenerateDictionaryStore(masm, &miss, dictionary, name, value, r9, r10);
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1, r9, r10);
+  __ IncrementCounter(counters->ic_store_normal_hit(), 1, r9, r10);
   __ Ret();
 
   __ bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1, r9, r10);
+  __ IncrementCounter(counters->ic_store_normal_miss(), 1, r9, r10);
   GenerateMiss(masm);
 }
 
diff --git a/src/ic/x64/handler-compiler-x64.cc b/src/ic/x64/handler-compiler-x64.cc
index c09eca6..ac3dd9a 100644
--- a/src/ic/x64/handler-compiler-x64.cc
+++ b/src/ic/x64/handler-compiler-x64.cc
@@ -4,8 +4,10 @@
 
 #if V8_TARGET_ARCH_X64
 
-#include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 
@@ -181,9 +183,16 @@
     call_data_undefined = true;
     __ LoadRoot(data, Heap::kUndefinedValueRootIndex);
   } else {
-    __ movp(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
-    __ movp(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
-    __ movp(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    if (optimization.is_constant_call()) {
+      __ movp(data,
+              FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+      __ movp(data,
+              FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+      __ movp(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    } else {
+      __ movp(data,
+              FieldOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+    }
     __ movp(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
   }
 
@@ -200,7 +209,8 @@
           RelocInfo::EXTERNAL_REFERENCE);
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+                           !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
 
@@ -395,8 +405,7 @@
   __ j(not_equal, miss_label);
 }
 
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
                                                         Register value_reg,
                                                         Label* miss_label) {
   Register map_reg = scratch1();
@@ -404,20 +413,12 @@
   DCHECK(!value_reg.is(map_reg));
   DCHECK(!value_reg.is(scratch));
   __ JumpIfSmi(value_reg, miss_label);
-  HeapType::Iterator<Map> it = field_type->Classes();
-  if (!it.Done()) {
+  if (field_type->IsClass()) {
     Label do_store;
     __ movp(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-    while (true) {
-      __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
-      it.Advance();
-      if (it.Done()) {
-        __ j(not_equal, miss_label);
-        break;
-      }
-      __ j(equal, &do_store, Label::kNear);
-    }
-    __ bind(&do_store);
+    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+                    scratch);
+    __ j(not_equal, miss_label);
   }
 }
 
@@ -592,24 +593,29 @@
 
 
 void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<ExecutableAccessorInfo> callback) {
-  // Insert additional parameters into the stack frame above return address.
-  DCHECK(!scratch4().is(reg));
-  __ PopReturnAddressTo(scratch4());
+    Register reg, Handle<AccessorInfo> callback) {
+  DCHECK(!AreAliased(kScratchRegister, scratch2(), scratch3(), receiver()));
+  DCHECK(!AreAliased(kScratchRegister, scratch2(), scratch3(), reg));
 
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+  // Insert additional parameters into the stack frame above return address.
+  __ PopReturnAddressTo(scratch3());
+
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
   __ Push(receiver());  // receiver
   Handle<Object> data(callback->data(), isolate());
   if (data->IsUndefined() || data->IsSmi()) {
     __ Push(data);
   } else {
-    DCHECK(!scratch2().is(reg));
     Handle<WeakCell> cell =
         isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
     // The callback is alive if this instruction is executed,
@@ -617,17 +623,15 @@
     __ GetWeakValue(scratch2(), cell);
     __ Push(scratch2());
   }
-  DCHECK(!kScratchRegister.is(reg));
   __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
   __ Push(kScratchRegister);  // return value
   __ Push(kScratchRegister);  // return value default
   __ PushAddress(ExternalReference::isolate_address(isolate()));
   __ Push(reg);     // holder
-  __ Push(name());  // name
-  // Save a pointer to where we pushed the arguments pointer.  This will be
-  // passed as the const PropertyAccessorInfo& to the C++ callback.
+  __ Push(Smi::FromInt(0));  // should_throw_on_error -> false
 
-  __ PushReturnAddressFrom(scratch4());
+  __ Push(name());  // name
+  __ PushReturnAddressFrom(scratch3());
 
   // Abi for CallApiGetter
   Register api_function_address = ApiGetterDescriptor::function_address();
@@ -722,8 +726,8 @@
 
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
-    Handle<JSObject> object, Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+    LanguageMode language_mode) {
   Register holder_reg = Frontend(name);
 
   __ PopReturnAddressTo(scratch1());
@@ -739,6 +743,7 @@
   }
   __ Push(name);
   __ Push(value());
+  __ Push(Smi::FromInt(language_mode));
   __ PushReturnAddressFrom(scratch1());
 
   // Do tail-call to the runtime system.
@@ -794,7 +799,7 @@
   }
 
   Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1);
+  __ IncrementCounter(counters->ic_named_load_global_stub(), 1);
   if (IC::ICUseVector(kind())) {
     DiscardVectorAndSlot();
   }
diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc
index bf4ad96..247116d 100644
--- a/src/ic/x64/ic-x64.cc
+++ b/src/ic/x64/ic-x64.cc
@@ -167,7 +167,7 @@
 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
                                   Register key, Register elements,
                                   Register scratch, Register result,
-                                  Label* slow, LanguageMode language_mode) {
+                                  Label* slow) {
   // Register use:
   //
   // receiver - holds the receiver on entry.
@@ -222,13 +222,8 @@
   __ jmp(&check_next_prototype);
 
   __ bind(&absent);
-  if (is_strong(language_mode)) {
-    // Strong mode accesses must throw in this case, so call the runtime.
-    __ jmp(slow);
-  } else {
-    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-    __ jmp(&done);
-  }
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ jmp(&done);
 
   __ bind(&in_bounds);
   // Fast case: Do the load.
@@ -274,9 +269,7 @@
   __ bind(&unique);
 }
 
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
-                                      LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // The return address is on the stack.
   Label slow, check_name, index_smi, index_name, property_array_property;
   Label probe_dictionary, check_number_dictionary;
@@ -298,10 +291,9 @@
   // Check the receiver's map to see if it has fast elements.
   __ CheckFastElements(rax, &check_number_dictionary);
 
-  GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, &slow,
-                        language_mode);
+  GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, &slow);
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+  __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
   __ ret(0);
 
   __ bind(&check_number_dictionary);
@@ -319,8 +311,8 @@
 
   __ bind(&slow);
   // Slow case: Jump to runtime.
-  __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
-  KeyedLoadIC::GenerateRuntimeGetProperty(masm, language_mode);
+  __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
+  KeyedLoadIC::GenerateRuntimeGetProperty(masm);
 
   __ bind(&check_name);
   GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow);
@@ -366,7 +358,7 @@
   GenerateGlobalInstanceTypeCheck(masm, rax, &slow);
 
   GenerateDictionaryLoad(masm, &slow, rbx, key, rax, rdi, rax);
-  __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+  __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
   __ ret(0);
 
   __ bind(&index_name);
@@ -626,8 +618,7 @@
   GenerateMiss(masm);
 }
 
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = rax;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -642,7 +633,7 @@
 
   // Dictionary load failed, go slow (but don't miss).
   __ bind(&slow);
-  LoadIC::GenerateRuntimeGetProperty(masm, language_mode);
+  LoadIC::GenerateRuntimeGetProperty(masm);
 }
 
 
@@ -667,7 +658,7 @@
   // The return address is on the stack.
 
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->load_miss(), 1);
+  __ IncrementCounter(counters->ic_load_miss(), 1);
 
   LoadIC_PushArgs(masm);
 
@@ -675,9 +666,7 @@
   __ TailCallRuntime(Runtime::kLoadIC_Miss);
 }
 
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                        LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is on the stack.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
@@ -690,15 +679,14 @@
   __ PushReturnAddressFrom(rbx);
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
-                                              : Runtime::kGetProperty);
+  __ TailCallRuntime(Runtime::kGetProperty);
 }
 
 
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // The return address is on the stack.
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_miss(), 1);
+  __ IncrementCounter(counters->ic_keyed_load_miss(), 1);
 
   LoadIC_PushArgs(masm);
 
@@ -706,9 +694,7 @@
   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
 }
 
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                             LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is on the stack.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
@@ -721,8 +707,7 @@
   __ PushReturnAddressFrom(rbx);
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
-                                              : Runtime::kKeyedGetProperty);
+  __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
 
@@ -774,11 +759,11 @@
   __ movp(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
   GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1);
+  __ IncrementCounter(counters->ic_store_normal_hit(), 1);
   __ ret(0);
 
   __ bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1);
+  __ IncrementCounter(counters->ic_store_normal_miss(), 1);
   GenerateMiss(masm);
 }
 
diff --git a/src/ic/x87/handler-compiler-x87.cc b/src/ic/x87/handler-compiler-x87.cc
index cc43ed2..1b25f06 100644
--- a/src/ic/x87/handler-compiler-x87.cc
+++ b/src/ic/x87/handler-compiler-x87.cc
@@ -4,8 +4,10 @@
 
 #if V8_TARGET_ARCH_X87
 
-#include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
 #include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 
@@ -197,9 +199,13 @@
     call_data_undefined = true;
     __ mov(data, Immediate(isolate->factory()->undefined_value()));
   } else {
-    __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
-    __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
-    __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    if (optimization.is_constant_call()) {
+      __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+      __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+      __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+    } else {
+      __ mov(data, FieldOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+    }
     __ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
   }
 
@@ -214,7 +220,8 @@
   __ mov(api_function_address, Immediate(function_address));
 
   // Jump to stub.
-  CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+  CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+                           !optimization.is_constant_call());
   __ TailCallStub(&stub);
 }
 
@@ -399,8 +406,7 @@
   __ j(not_equal, miss_label);
 }
 
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
                                                         Register value_reg,
                                                         Label* miss_label) {
   Register map_reg = scratch1();
@@ -408,20 +414,11 @@
   DCHECK(!value_reg.is(map_reg));
   DCHECK(!value_reg.is(scratch));
   __ JumpIfSmi(value_reg, miss_label);
-  HeapType::Iterator<Map> it = field_type->Classes();
-  if (!it.Done()) {
-    Label do_store;
+  if (field_type->IsClass()) {
     __ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-    while (true) {
-      __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
-      it.Advance();
-      if (it.Done()) {
-        __ j(not_equal, miss_label);
-        break;
-      }
-      __ j(equal, &do_store, Label::kNear);
-    }
-    __ bind(&do_store);
+    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+                    scratch);
+    __ j(not_equal, miss_label);
   }
 }
 
@@ -593,24 +590,30 @@
 
 
 void NamedLoadHandlerCompiler::GenerateLoadCallback(
-    Register reg, Handle<ExecutableAccessorInfo> callback) {
+    Register reg, Handle<AccessorInfo> callback) {
+  DCHECK(!AreAliased(scratch2(), scratch3(), receiver()));
+  DCHECK(!AreAliased(scratch2(), scratch3(), reg));
+
   // Insert additional parameters into the stack frame above return address.
-  DCHECK(!scratch3().is(reg));
   __ pop(scratch3());  // Get return address to place it below.
 
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+  // name below the exit frame to make GC aware of them.
+  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
   __ push(receiver());  // receiver
-  // Push data from ExecutableAccessorInfo.
+  // Push data from AccessorInfo.
   Handle<Object> data(callback->data(), isolate());
   if (data->IsUndefined() || data->IsSmi()) {
     __ push(Immediate(data));
   } else {
-    DCHECK(!scratch2().is(reg));
     Handle<WeakCell> cell =
         isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
     // The callback is alive if this instruction is executed,
@@ -623,13 +626,9 @@
   __ push(Immediate(isolate()->factory()->undefined_value()));
   __ push(Immediate(reinterpret_cast<int>(isolate())));
   __ push(reg);  // holder
-
-  // Save a pointer to where we pushed the arguments. This will be
-  // passed as the const PropertyAccessorInfo& to the C++ callback.
-  __ push(esp);
+  __ push(Immediate(Smi::FromInt(0)));  // should_throw_on_error -> false
 
   __ push(name());  // name
-
   __ push(scratch3());  // Restore return address.
 
   // Abi for CallApiGetter
@@ -731,8 +730,8 @@
 
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
-    Handle<JSObject> object, Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
+    Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+    LanguageMode language_mode) {
   Register holder_reg = Frontend(name);
 
   __ pop(scratch1());  // remove the return address
@@ -748,6 +747,7 @@
   }
   __ Push(name);
   __ push(value());
+  __ push(Immediate(Smi::FromInt(language_mode)));
   __ push(scratch1());  // restore return address
 
   // Do tail-call to the runtime system.
@@ -802,7 +802,7 @@
   }
 
   Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1);
+  __ IncrementCounter(counters->ic_named_load_global_stub(), 1);
   // The code above already loads the result into the return register.
   if (IC::ICUseVector(kind())) {
     DiscardVectorAndSlot();
diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc
index d4cc3ce..5bbd9c5 100644
--- a/src/ic/x87/ic-x87.cc
+++ b/src/ic/x87/ic-x87.cc
@@ -167,7 +167,7 @@
 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
                                   Register key, Register scratch,
                                   Register scratch2, Register result,
-                                  Label* slow, LanguageMode language_mode) {
+                                  Label* slow) {
   // Register use:
   //   receiver - holds the receiver and is unchanged.
   //   key - holds the key and is unchanged (must be a smi).
@@ -211,13 +211,8 @@
   __ jmp(&check_next_prototype);
 
   __ bind(&absent);
-  if (is_strong(language_mode)) {
-    // Strong mode accesses must throw in this case, so call the runtime.
-    __ jmp(slow);
-  } else {
-    __ mov(result, masm->isolate()->factory()->undefined_value());
-    __ jmp(&done);
-  }
+  __ mov(result, masm->isolate()->factory()->undefined_value());
+  __ jmp(&done);
 
   __ bind(&in_bounds);
   // Fast case: Do the load.
@@ -262,9 +257,7 @@
   __ bind(&unique);
 }
 
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
-                                      LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // The return address is on the stack.
   Label slow, check_name, index_smi, index_name, property_array_property;
   Label probe_dictionary, check_number_dictionary;
@@ -286,11 +279,10 @@
   // Check the receiver's map to see if it has fast elements.
   __ CheckFastElements(eax, &check_number_dictionary);
 
-  GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow,
-                        language_mode);
+  GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
   Isolate* isolate = masm->isolate();
   Counters* counters = isolate->counters();
-  __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+  __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
   __ ret(0);
 
   __ bind(&check_number_dictionary);
@@ -318,8 +310,8 @@
 
   __ bind(&slow);
   // Slow case: jump to runtime.
-  __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
+  GenerateRuntimeGetProperty(masm);
 
   __ bind(&check_name);
   GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
@@ -363,7 +355,7 @@
   GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
 
   GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
-  __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+  __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
   __ ret(0);
 
   __ bind(&index_name);
@@ -628,8 +620,7 @@
   GenerateMiss(masm);
 }
 
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
   Register dictionary = eax;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -644,7 +635,7 @@
 
   // Dictionary load failed, go slow (but don't miss).
   __ bind(&slow);
-  GenerateRuntimeGetProperty(masm, language_mode);
+  GenerateRuntimeGetProperty(masm);
 }
 
 
@@ -668,16 +659,14 @@
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
   // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
+  __ IncrementCounter(masm->isolate()->counters()->ic_load_miss(), 1);
   LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   __ TailCallRuntime(Runtime::kLoadIC_Miss);
 }
 
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                        LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // Return address is on the stack.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
@@ -689,14 +678,13 @@
   __ push(ebx);
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
-                                              : Runtime::kGetProperty);
+  __ TailCallRuntime(Runtime::kGetProperty);
 }
 
 
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
+  __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_miss(), 1);
 
   LoadIC_PushArgs(masm);
 
@@ -704,9 +692,7 @@
   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
 }
 
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
-                                             LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // Return address is on the stack.
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
@@ -718,8 +704,7 @@
   __ push(ebx);
 
   // Do tail-call to runtime routine.
-  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
-                                              : Runtime::kKeyedGetProperty);
+  __ TailCallRuntime(Runtime::kKeyedGetProperty);
 }
 
 
@@ -777,14 +762,14 @@
                           receiver, edi);
   __ Drop(3);
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1);
+  __ IncrementCounter(counters->ic_store_normal_hit(), 1);
   __ ret(0);
 
   __ bind(&restore_miss);
   __ pop(slot);
   __ pop(vector);
   __ pop(receiver);
-  __ IncrementCounter(counters->store_normal_miss(), 1);
+  __ IncrementCounter(counters->ic_store_normal_miss(), 1);
   GenerateMiss(masm);
 }
 
diff --git a/src/identity-map.cc b/src/identity-map.cc
index 723cdfa..97b70ae 100644
--- a/src/identity-map.cc
+++ b/src/identity-map.cc
@@ -4,7 +4,7 @@
 
 #include "src/identity-map.h"
 
-#include "src/heap/heap.h"
+#include "src/base/functional.h"
 #include "src/heap/heap-inl.h"
 #include "src/zone-containers.h"
 
@@ -14,10 +14,17 @@
 static const int kInitialIdentityMapSize = 4;
 static const int kResizeFactor = 4;
 
-IdentityMapBase::~IdentityMapBase() {
-  if (keys_) heap_->UnregisterStrongRoots(keys_);
-}
+IdentityMapBase::~IdentityMapBase() { Clear(); }
 
+void IdentityMapBase::Clear() {
+  if (keys_) {
+    heap_->UnregisterStrongRoots(keys_);
+    keys_ = nullptr;
+    values_ = nullptr;
+    size_ = 0;
+    mask_ = 0;
+  }
+}
 
 IdentityMapBase::RawEntry IdentityMapBase::Lookup(Object* key) {
   int index = LookupIndex(key);
@@ -35,8 +42,7 @@
 int IdentityMapBase::Hash(Object* address) {
   CHECK_NE(address, heap_->not_mapped_symbol());
   uintptr_t raw_address = reinterpret_cast<uintptr_t>(address);
-  // Xor some of the upper bits, since the lower 2 or 3 are usually aligned.
-  return static_cast<int>((raw_address >> 11) ^ raw_address);
+  return static_cast<int>(hasher_(raw_address));
 }
 
 
diff --git a/src/identity-map.h b/src/identity-map.h
index 2c4a0f3..ad2a260 100644
--- a/src/identity-map.h
+++ b/src/identity-map.h
@@ -5,6 +5,7 @@
 #ifndef V8_IDENTITY_MAP_H_
 #define V8_IDENTITY_MAP_H_
 
+#include "src/base/functional.h"
 #include "src/handles.h"
 
 namespace v8 {
@@ -36,6 +37,7 @@
 
   RawEntry GetEntry(Object* key);
   RawEntry FindEntry(Object* key);
+  void Clear();
 
  private:
   // Internal implementation should not be called directly by subclasses.
@@ -47,6 +49,7 @@
   RawEntry Insert(Object* key);
   int Hash(Object* address);
 
+  base::hash<uintptr_t> hasher_;
   Heap* heap_;
   Zone* zone_;
   int gc_counter_;
@@ -85,6 +88,9 @@
   // Set the value for the given key.
   void Set(Handle<Object> key, V v) { Set(*key, v); }
   void Set(Object* key, V v) { *(reinterpret_cast<V*>(GetEntry(key))) = v; }
+
+  // Removes all elements from the map.
+  void Clear() { IdentityMapBase::Clear(); }
 };
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interface-descriptors.cc b/src/interface-descriptors.cc
index 94ed702..cc46a56 100644
--- a/src/interface-descriptors.cc
+++ b/src/interface-descriptors.cc
@@ -31,12 +31,12 @@
 }
 }  // namespace
 
-
-Type::FunctionType* CallInterfaceDescriptor::BuildDefaultFunctionType(
+FunctionType* CallInterfaceDescriptor::BuildDefaultFunctionType(
     Isolate* isolate, int parameter_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function = Type::FunctionType::New(
-      AnyTagged(zone), Type::Undefined(), parameter_count, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), parameter_count, zone)
+          ->AsFunction();
   while (parameter_count-- != 0) {
     function->InitParameter(parameter_count, AnyTagged(zone));
   }
@@ -86,12 +86,11 @@
   data->InitializePlatformSpecific(0, nullptr);
 }
 
-
-Type::FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));
   function->InitParameter(1, AnyTagged(zone));
   function->InitParameter(2, SmiType(zone));
@@ -136,13 +135,12 @@
   }
 }
 
-
-Type::FunctionType*
+FunctionType*
 StoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));  // Receiver
   function->InitParameter(1, AnyTagged(zone));  // Name
   function->InitParameter(2, AnyTagged(zone));  // Value
@@ -150,13 +148,12 @@
   return function;
 }
 
-
-Type::FunctionType*
+FunctionType*
 LoadGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 1, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 1, zone)->AsFunction();
   function->InitParameter(0, UntaggedIntegral32(zone));
   return function;
 }
@@ -168,13 +165,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-Type::FunctionType*
+FunctionType*
 StoreGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
   function->InitParameter(0, UntaggedIntegral32(zone));
   function->InitParameter(1, AnyTagged(zone));
   return function;
@@ -216,6 +212,13 @@
 }
 
 
+void ToNameDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ReceiverRegister()};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
 void ToObjectDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {ReceiverRegister()};
@@ -236,13 +239,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-Type::FunctionType*
+FunctionType*
 LoadWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));
   function->InitParameter(1, AnyTagged(zone));
   function->InitParameter(2, SmiType(zone));
@@ -258,15 +260,15 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-Type::FunctionType*
+FunctionType*
 VectorStoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
   bool has_slot = !VectorStoreTransitionDescriptor::SlotRegister().is(no_reg);
   int arg_count = has_slot ? 6 : 5;
-  Type::FunctionType* function = Type::FunctionType::New(
-      AnyTagged(zone), Type::Undefined(), arg_count, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), arg_count, zone)
+          ->AsFunction();
   int index = 0;
   function->InitParameter(index++, AnyTagged(zone));  // receiver
   function->InitParameter(index++, AnyTagged(zone));  // name
@@ -279,13 +281,11 @@
   return function;
 }
 
-
-Type::FunctionType*
-VectorStoreICDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* VectorStoreICDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 5, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));
   function->InitParameter(1, AnyTagged(zone));
   function->InitParameter(2, AnyTagged(zone));
@@ -302,13 +302,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-Type::FunctionType*
+FunctionType*
 VectorStoreICTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));
   function->InitParameter(1, AnyTagged(zone));
   function->InitParameter(2, AnyTagged(zone));
@@ -324,13 +323,11 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-Type::FunctionType*
-ApiGetterDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* ApiGetterDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 1, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 1, zone)->AsFunction();
   function->InitParameter(0, ExternalPointer(zone));
   return function;
 }
@@ -343,54 +340,6 @@
 }
 
 
-void ArgumentsAccessReadDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {index(), parameter_count()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-Type::FunctionType*
-ArgumentsAccessNewDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int paramater_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
-  function->InitParameter(0, AnyTagged(zone));
-  function->InitParameter(1, SmiType(zone));
-  function->InitParameter(2, ExternalPointer(zone));
-  return function;
-}
-
-
-void ArgumentsAccessNewDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {function(), parameter_count(), parameter_pointer()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-Type::FunctionType*
-RestParamAccessDescriptor::BuildCallInterfaceDescriptorFunctionType(
-    Isolate* isolate, int paramater_count) {
-  Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
-  function->InitParameter(0, SmiType(zone));
-  function->InitParameter(1, ExternalPointer(zone));
-  function->InitParameter(2, SmiType(zone));
-  return function;
-}
-
-
-void RestParamAccessDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {parameter_count(), parameter_pointer(),
-                          rest_parameter_index()};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void ContextOnlyDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   data->InitializePlatformSpecific(0, nullptr);
@@ -403,13 +352,12 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-Type::FunctionType*
+FunctionType*
 FastCloneRegExpDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));  // closure
   function->InitParameter(1, SmiType(zone));    // literal_index
   function->InitParameter(2, AnyTagged(zone));  // pattern
@@ -417,63 +365,57 @@
   return function;
 }
 
-
-Type::FunctionType*
+FunctionType*
 FastCloneShallowArrayDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));
   function->InitParameter(1, SmiType(zone));
   function->InitParameter(2, AnyTagged(zone));
   return function;
 }
 
-
-Type::FunctionType*
+FunctionType*
 CreateAllocationSiteDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));
   function->InitParameter(1, SmiType(zone));
   return function;
 }
 
-
-Type::FunctionType*
+FunctionType*
 CreateWeakCellDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));
   function->InitParameter(1, SmiType(zone));
   function->InitParameter(2, AnyTagged(zone));
   return function;
 }
 
-
-Type::FunctionType*
+FunctionType*
 CallTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));           // target
   function->InitParameter(1, UntaggedIntegral32(zone));  // actual #arguments
   return function;
 }
 
-
-Type::FunctionType*
-ConstructStubDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* ConstructStubDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));           // target
   function->InitParameter(1, AnyTagged(zone));           // new.target
   function->InitParameter(2, UntaggedIntegral32(zone));  // actual #arguments
@@ -481,76 +423,70 @@
   return function;
 }
 
-
-Type::FunctionType*
+FunctionType*
 ConstructTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));           // target
   function->InitParameter(1, AnyTagged(zone));           // new.target
   function->InitParameter(2, UntaggedIntegral32(zone));  // actual #arguments
   return function;
 }
 
-
-Type::FunctionType*
+FunctionType*
 CallFunctionWithFeedbackDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
   function->InitParameter(0, Type::Receiver());  // JSFunction
   function->InitParameter(1, SmiType(zone));
   return function;
 }
 
-
-Type::FunctionType* CallFunctionWithFeedbackAndVectorDescriptor::
+FunctionType* CallFunctionWithFeedbackAndVectorDescriptor::
     BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
                                              int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
   function->InitParameter(0, Type::Receiver());  // JSFunction
   function->InitParameter(1, SmiType(zone));
   function->InitParameter(2, AnyTagged(zone));
   return function;
 }
 
-
-Type::FunctionType*
+FunctionType*
 ArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
   function->InitParameter(0, Type::Receiver());  // JSFunction
   function->InitParameter(1, AnyTagged(zone));
   function->InitParameter(2, UntaggedIntegral32(zone));
   return function;
 }
 
-
-Type::FunctionType*
+FunctionType*
 InternalArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
   function->InitParameter(0, Type::Receiver());  // JSFunction
   function->InitParameter(1, UntaggedIntegral32(zone));
   return function;
 }
 
-
-Type::FunctionType*
+FunctionType*
 ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
   function->InitParameter(0, Type::Receiver());          // JSFunction
   function->InitParameter(1, AnyTagged(zone));           // the new target
   function->InitParameter(2, UntaggedIntegral32(zone));  // actual #arguments
@@ -558,13 +494,11 @@
   return function;
 }
 
-
-Type::FunctionType*
-ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 5, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));           // callee
   function->InitParameter(1, AnyTagged(zone));           // call_data
   function->InitParameter(2, AnyTagged(zone));           // holder
@@ -573,13 +507,11 @@
   return function;
 }
 
-
-Type::FunctionType*
-ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
     Isolate* isolate, int paramater_count) {
   Zone* zone = isolate->interface_descriptor_zone();
-  Type::FunctionType* function =
-      Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
   function->InitParameter(0, AnyTagged(zone));        // callee
   function->InitParameter(1, AnyTagged(zone));        // call_data
   function->InitParameter(2, AnyTagged(zone));        // holder
@@ -587,6 +519,19 @@
   return function;
 }
 
+FunctionType*
+InterpreterDispatchDescriptor::BuildCallInterfaceDescriptorFunctionType(
+    Isolate* isolate, int parameter_count) {
+  Zone* zone = isolate->interface_descriptor_zone();
+  FunctionType* function =
+      Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
+  function->InitParameter(kAccumulatorParameter, AnyTagged(zone));
+  function->InitParameter(kRegisterFileParameter, ExternalPointer(zone));
+  function->InitParameter(kBytecodeOffsetParameter, UntaggedIntegral32(zone));
+  function->InitParameter(kBytecodeArrayParameter, AnyTagged(zone));
+  function->InitParameter(kDispatchTableParameter, AnyTagged(zone));
+  return function;
+}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interface-descriptors.h b/src/interface-descriptors.h
index 2814dae..fb1969d 100644
--- a/src/interface-descriptors.h
+++ b/src/interface-descriptors.h
@@ -25,9 +25,14 @@
   V(LoadWithVector)                           \
   V(FastNewClosure)                           \
   V(FastNewContext)                           \
+  V(FastNewObject)                            \
+  V(FastNewRestParameter)                     \
+  V(FastNewSloppyArguments)                   \
+  V(FastNewStrictArguments)                   \
   V(ToNumber)                                 \
   V(ToLength)                                 \
   V(ToString)                                 \
+  V(ToName)                                   \
   V(ToObject)                                 \
   V(NumberToString)                           \
   V(Typeof)                                   \
@@ -66,21 +71,17 @@
   V(ApiFunction)                              \
   V(ApiAccessor)                              \
   V(ApiGetter)                                \
-  V(ArgumentsAccessRead)                      \
-  V(ArgumentsAccessNew)                       \
-  V(RestParamAccess)                          \
-  V(StoreArrayLiteralElement)                 \
   V(LoadGlobalViaContext)                     \
   V(StoreGlobalViaContext)                    \
   V(MathPowTagged)                            \
   V(MathPowInteger)                           \
   V(ContextOnly)                              \
   V(GrowArrayElements)                        \
+  V(InterpreterDispatch)                      \
   V(InterpreterPushArgsAndCall)               \
   V(InterpreterPushArgsAndConstruct)          \
   V(InterpreterCEntry)
 
-
 class CallInterfaceDescriptorData {
  public:
   CallInterfaceDescriptorData()
@@ -89,7 +90,7 @@
   // A copy of the passed in registers and param_representations is made
   // and owned by the CallInterfaceDescriptorData.
 
-  void InitializePlatformIndependent(Type::FunctionType* function_type) {
+  void InitializePlatformIndependent(FunctionType* function_type) {
     function_type_ = function_type;
   }
 
@@ -112,7 +113,7 @@
     return platform_specific_descriptor_;
   }
 
-  Type::FunctionType* function_type() const { return function_type_; }
+  FunctionType* function_type() const { return function_type_; }
 
  private:
   int register_param_count_;
@@ -124,7 +125,7 @@
   base::SmartArrayPointer<Register> register_params_;
 
   // Specifies types for parameters and return
-  Type::FunctionType* function_type_;
+  FunctionType* function_type_;
 
   PlatformInterfaceDescriptor* platform_specific_descriptor_;
 
@@ -175,21 +176,19 @@
     return data()->platform_specific_descriptor();
   }
 
-  Type::FunctionType* GetFunctionType() const {
-    return data()->function_type();
-  }
+  FunctionType* GetFunctionType() const { return data()->function_type(); }
 
   static const Register ContextRegister();
 
   const char* DebugName(Isolate* isolate) const;
 
-  static Type::FunctionType* BuildDefaultFunctionType(Isolate* isolate,
-                                                      int paramater_count);
+  static FunctionType* BuildDefaultFunctionType(Isolate* isolate,
+                                                int paramater_count);
 
  protected:
   const CallInterfaceDescriptorData* data() const { return data_; }
 
-  virtual Type::FunctionType* BuildCallInterfaceDescriptorFunctionType(
+  virtual FunctionType* BuildCallInterfaceDescriptorFunctionType(
       Isolate* isolate, int register_param_count) {
     return BuildDefaultFunctionType(isolate, register_param_count);
   }
@@ -202,9 +201,8 @@
     if (!data()->IsInitialized()) {
       CallInterfaceDescriptorData* d = isolate->call_descriptor_data(key);
       InitializePlatformSpecific(d);
-      Type::FunctionType* function_type =
-          BuildCallInterfaceDescriptorFunctionType(isolate,
-                                                   d->register_param_count());
+      FunctionType* function_type = BuildCallInterfaceDescriptorFunctionType(
+          isolate, d->register_param_count());
       d->InitializePlatformIndependent(function_type);
     }
   }
@@ -226,16 +224,14 @@
  public:                                                                       \
   static inline CallDescriptors::Key key();
 
-
 #define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
   DECLARE_DESCRIPTOR(name, base)                                 \
  protected:                                                      \
-  Type::FunctionType* BuildCallInterfaceDescriptorFunctionType(  \
+  FunctionType* BuildCallInterfaceDescriptorFunctionType(        \
       Isolate* isolate, int register_param_count) override;      \
                                                                  \
  public:
 
-
 class VoidDescriptor : public CallInterfaceDescriptor {
  public:
   DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
@@ -379,6 +375,28 @@
   DECLARE_DESCRIPTOR(FastNewContextDescriptor, CallInterfaceDescriptor)
 };
 
+class FastNewObjectDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(FastNewObjectDescriptor, CallInterfaceDescriptor)
+};
+
+class FastNewRestParameterDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(FastNewRestParameterDescriptor, CallInterfaceDescriptor)
+};
+
+class FastNewSloppyArgumentsDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(FastNewSloppyArgumentsDescriptor,
+                     CallInterfaceDescriptor)
+};
+
+class FastNewStrictArgumentsDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(FastNewStrictArgumentsDescriptor,
+                     CallInterfaceDescriptor)
+};
+
 
 class ToNumberDescriptor : public CallInterfaceDescriptor {
  public:
@@ -406,6 +424,16 @@
 };
 
 
+class ToNameDescriptor : public CallInterfaceDescriptor {
+ public:
+  enum ParameterIndices { kReceiverIndex };
+
+  DECLARE_DESCRIPTOR(ToNameDescriptor, CallInterfaceDescriptor)
+
+  static const Register ReceiverRegister();
+};
+
+
 class ToObjectDescriptor : public CallInterfaceDescriptor {
  public:
   enum ParameterIndices { kReceiverIndex };
@@ -692,43 +720,6 @@
 };
 
 
-class ArgumentsAccessReadDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR(ArgumentsAccessReadDescriptor, CallInterfaceDescriptor)
-
-  static const Register index();
-  static const Register parameter_count();
-};
-
-
-class ArgumentsAccessNewDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArgumentsAccessNewDescriptor,
-                                               CallInterfaceDescriptor)
-
-  static const Register function();
-  static const Register parameter_count();
-  static const Register parameter_pointer();
-};
-
-
-class RestParamAccessDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(RestParamAccessDescriptor,
-                                               CallInterfaceDescriptor)
-  static const Register parameter_count();
-  static const Register parameter_pointer();
-  static const Register rest_parameter_index();
-};
-
-
-class StoreArrayLiteralElementDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR(StoreArrayLiteralElementDescriptor,
-                     CallInterfaceDescriptor)
-};
-
-
 class MathPowTaggedDescriptor : public CallInterfaceDescriptor {
  public:
   DECLARE_DESCRIPTOR(MathPowTaggedDescriptor, CallInterfaceDescriptor)
@@ -760,6 +751,18 @@
   static const Register KeyRegister();
 };
 
+class InterpreterDispatchDescriptor  : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterDispatchDescriptor,
+                                               CallInterfaceDescriptor)
+
+  static const int kAccumulatorParameter = 0;
+  static const int kRegisterFileParameter = 1;
+  static const int kBytecodeOffsetParameter = 2;
+  static const int kBytecodeArrayParameter = 3;
+  static const int kDispatchTableParameter = 4;
+  static const int kContextParameter = 5;
+};
 
 class InterpreterPushArgsAndCallDescriptor : public CallInterfaceDescriptor {
  public:
@@ -781,7 +784,6 @@
   DECLARE_DESCRIPTOR(InterpreterCEntryDescriptor, CallInterfaceDescriptor)
 };
 
-
 #undef DECLARE_DESCRIPTOR
 
 
diff --git a/src/interpreter/DEPS b/src/interpreter/DEPS
deleted file mode 100644
index f8d6b98..0000000
--- a/src/interpreter/DEPS
+++ /dev/null
@@ -1,3 +0,0 @@
-include_rules = [
-  "+src/compiler/interpreter-assembler.h",
-]
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index 1b15fc6..7103c72 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -3,12 +3,13 @@
 // found in the LICENSE file.
 
 #include "src/interpreter/bytecode-array-builder.h"
+#include "src/compiler.h"
 
 namespace v8 {
 namespace internal {
 namespace interpreter {
 
-class BytecodeArrayBuilder::PreviousBytecodeHelper {
+class BytecodeArrayBuilder::PreviousBytecodeHelper BASE_EMBEDDED {
  public:
   explicit PreviousBytecodeHelper(const BytecodeArrayBuilder& array_builder)
       : array_builder_(array_builder),
@@ -37,9 +38,9 @@
         Bytecodes::GetOperandOffset(bytecode, operand_index);
     OperandSize size = Bytecodes::GetOperandSize(bytecode, operand_index);
     switch (size) {
-      default:
       case OperandSize::kNone:
         UNREACHABLE();
+        break;
       case OperandSize::kByte:
         return static_cast<uint32_t>(
             array_builder_.bytecodes()->at(operand_offset));
@@ -49,6 +50,7 @@
             array_builder_.bytecodes()->at(operand_offset + 1);
         return static_cast<uint32_t>(operand);
     }
+    return 0;
   }
 
   Handle<Object> GetConstantForIndexOperand(int operand_index) const {
@@ -63,43 +65,31 @@
   DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
 };
 
-
-BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone)
+BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone,
+                                           int parameter_count,
+                                           int context_count, int locals_count)
     : isolate_(isolate),
       zone_(zone),
       bytecodes_(zone),
       bytecode_generated_(false),
       constant_array_builder_(isolate, zone),
+      handler_table_builder_(isolate, zone),
+      source_position_table_builder_(isolate, zone),
       last_block_end_(0),
       last_bytecode_start_(~0),
       exit_seen_in_block_(false),
       unbound_jumps_(0),
-      parameter_count_(-1),
-      local_register_count_(-1),
-      context_register_count_(-1),
-      temporary_register_count_(0),
-      free_temporaries_(zone) {}
-
-
-BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
-
-
-void BytecodeArrayBuilder::set_locals_count(int number_of_locals) {
-  local_register_count_ = number_of_locals;
-  DCHECK_LE(context_register_count_, 0);
-}
-
-
-void BytecodeArrayBuilder::set_parameter_count(int number_of_parameters) {
-  parameter_count_ = number_of_parameters;
-}
-
-
-void BytecodeArrayBuilder::set_context_count(int number_of_contexts) {
-  context_register_count_ = number_of_contexts;
+      parameter_count_(parameter_count),
+      local_register_count_(locals_count),
+      context_register_count_(context_count),
+      temporary_allocator_(zone, fixed_register_count()),
+      register_translator_(this) {
+  DCHECK_GE(parameter_count_, 0);
+  DCHECK_GE(context_register_count_, 0);
   DCHECK_GE(local_register_count_, 0);
 }
 
+BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
 
 Register BytecodeArrayBuilder::first_context_register() const {
   DCHECK_GT(context_register_count_, 0);
@@ -113,18 +103,6 @@
 }
 
 
-Register BytecodeArrayBuilder::first_temporary_register() const {
-  DCHECK_GT(temporary_register_count_, 0);
-  return Register(fixed_register_count());
-}
-
-
-Register BytecodeArrayBuilder::last_temporary_register() const {
-  DCHECK_GT(temporary_register_count_, 0);
-  return Register(fixed_register_count() + temporary_register_count_ - 1);
-}
-
-
 Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
   DCHECK_GE(parameter_index, 0);
   return Register::FromParameterIndex(parameter_index, parameter_count());
@@ -136,25 +114,23 @@
 }
 
 
-bool BytecodeArrayBuilder::RegisterIsTemporary(Register reg) const {
-  return temporary_register_count_ > 0 && first_temporary_register() <= reg &&
-         reg <= last_temporary_register();
-}
-
-
 Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
   DCHECK_EQ(bytecode_generated_, false);
-  EnsureReturn();
+  DCHECK(exit_seen_in_block_);
 
   int bytecode_size = static_cast<int>(bytecodes_.size());
-  int register_count = fixed_register_count() + temporary_register_count_;
+  int register_count =
+      fixed_and_temporary_register_count() + translation_register_count();
   int frame_size = register_count * kPointerSize;
-  Factory* factory = isolate_->factory();
-  Handle<FixedArray> constant_pool =
-      constant_array_builder()->ToFixedArray(factory);
-  Handle<BytecodeArray> output =
-      factory->NewBytecodeArray(bytecode_size, &bytecodes_.front(), frame_size,
-                                parameter_count(), constant_pool);
+  Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
+  Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
+  Handle<FixedArray> source_position_table =
+      source_position_table_builder()->ToFixedArray();
+  Handle<BytecodeArray> output = isolate_->factory()->NewBytecodeArray(
+      bytecode_size, &bytecodes_.front(), frame_size, parameter_count(),
+      constant_pool);
+  output->set_handler_table(*handler_table);
+  output->set_source_position_table(*source_position_table);
   bytecode_generated_ = true;
   return output;
 }
@@ -163,16 +139,28 @@
 template <size_t N>
 void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
   // Don't output dead code.
-  if (exit_seen_in_block_) return;
+  if (exit_seen_in_block_) {
+    source_position_table_builder_.RevertPosition(bytecodes()->size());
+    return;
+  }
 
-  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), static_cast<int>(N));
+  int operand_count = static_cast<int>(N);
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
+
+  int register_operand_count = Bytecodes::NumberOfRegisterOperands(bytecode);
+  if (register_operand_count > 0) {
+    register_translator()->TranslateInputRegisters(bytecode, operands,
+                                                   operand_count);
+  }
+
   last_bytecode_start_ = bytecodes()->size();
   bytecodes()->push_back(Bytecodes::ToByte(bytecode));
-  for (int i = 0; i < static_cast<int>(N); i++) {
+  for (int i = 0; i < operand_count; i++) {
     DCHECK(OperandIsValid(bytecode, i, operands[i]));
     switch (Bytecodes::GetOperandSize(bytecode, i)) {
       case OperandSize::kNone:
         UNREACHABLE();
+        break;
       case OperandSize::kByte:
         bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
         break;
@@ -185,6 +173,10 @@
       }
     }
   }
+
+  if (register_operand_count > 0) {
+    register_translator()->TranslateOutputRegisters();
+  }
 }
 
 
@@ -218,32 +210,23 @@
 
 void BytecodeArrayBuilder::Output(Bytecode bytecode) {
   // Don't output dead code.
-  if (exit_seen_in_block_) return;
+  if (exit_seen_in_block_) {
+    source_position_table_builder_.RevertPosition(bytecodes()->size());
+    return;
+  }
 
   DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
   last_bytecode_start_ = bytecodes()->size();
   bytecodes()->push_back(Bytecodes::ToByte(bytecode));
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
-                                                            Register reg,
-                                                            Strength strength) {
-  if (is_strong(strength)) {
-    UNIMPLEMENTED();
-  }
-
-  Output(BytecodeForBinaryOperation(op), reg.ToOperand());
+                                                            Register reg) {
+  Output(BytecodeForBinaryOperation(op), reg.ToRawOperand());
   return *this;
 }
 
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
-                                                           Strength strength) {
-  if (is_strong(strength)) {
-    UNIMPLEMENTED();
-  }
-
+BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op) {
   Output(BytecodeForCountOperation(op));
   return *this;
 }
@@ -260,14 +243,9 @@
   return *this;
 }
 
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
-    Token::Value op, Register reg, Strength strength) {
-  if (is_strong(strength)) {
-    UNIMPLEMENTED();
-  }
-
-  Output(BytecodeForCompareOperation(op), reg.ToOperand());
+BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
+                                                             Register reg) {
+  Output(BytecodeForCompareOperation(op), reg.ToRawOperand());
   return *this;
 }
 
@@ -338,11 +316,10 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
     Register reg) {
   if (!IsRegisterInAccumulator(reg)) {
-    Output(Bytecode::kLdar, reg.ToOperand());
+    Output(Bytecode::kLdar, reg.ToRawOperand());
   }
   return *this;
 }
@@ -350,15 +327,8 @@
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
     Register reg) {
-  // TODO(oth): Avoid storing the accumulator in the register if the
-  // previous bytecode loaded the accumulator with the same register.
-  //
-  // TODO(oth): If the previous bytecode is a MOV into this register,
-  // the previous instruction can be removed. The logic for determining
-  // these redundant MOVs appears complex.
-  Output(Bytecode::kStar, reg.ToOperand());
   if (!IsRegisterInAccumulator(reg)) {
-    Output(Bytecode::kStar, reg.ToOperand());
+    Output(Bytecode::kStar, reg.ToRawOperand());
   }
   return *this;
 }
@@ -367,31 +337,37 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
                                                          Register to) {
   DCHECK(from != to);
-  Output(Bytecode::kMov, from.ToOperand(), to.ToOperand());
-  return *this;
-}
-
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::ExchangeRegisters(Register reg0,
-                                                              Register reg1) {
-  DCHECK(reg0 != reg1);
-  if (FitsInReg8Operand(reg0)) {
-    Output(Bytecode::kExchange, reg0.ToOperand(), reg1.ToWideOperand());
-  } else if (FitsInReg8Operand(reg1)) {
-    Output(Bytecode::kExchange, reg1.ToOperand(), reg0.ToWideOperand());
+  if (FitsInReg8Operand(from) && FitsInReg8Operand(to)) {
+    Output(Bytecode::kMov, from.ToRawOperand(), to.ToRawOperand());
+  } else if (FitsInReg16Operand(from) && FitsInReg16Operand(to)) {
+    Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
   } else {
-    Output(Bytecode::kExchangeWide, reg0.ToWideOperand(), reg1.ToWideOperand());
+    UNIMPLEMENTED();
   }
   return *this;
 }
 
+void BytecodeArrayBuilder::MoveRegisterUntranslated(Register from,
+                                                    Register to) {
+  // Move bytecodes modify the stack. Checking validity is an
+  // essential mitigation against corrupting the stack.
+  if (FitsInReg8OperandUntranslated(from)) {
+    CHECK(RegisterIsValid(from, OperandType::kReg8) &&
+          RegisterIsValid(to, OperandType::kReg16));
+  } else if (FitsInReg8OperandUntranslated(to)) {
+    CHECK(RegisterIsValid(from, OperandType::kReg16) &&
+          RegisterIsValid(to, OperandType::kReg8));
+  } else {
+    UNIMPLEMENTED();
+  }
+  Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
+}
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
-    const Handle<String> name, int feedback_slot, LanguageMode language_mode,
-    TypeofMode typeof_mode) {
-  // TODO(rmcilroy): Potentially store language and typeof information in an
+    const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
+  // TODO(rmcilroy): Potentially store typeof information in an
   // operand rather than having extra bytecodes.
-  Bytecode bytecode = BytecodeForLoadGlobal(language_mode, typeof_mode);
+  Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
   size_t name_index = GetConstantPoolEntry(name);
   if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
     Output(bytecode, static_cast<uint8_t>(name_index),
@@ -429,10 +405,10 @@
                                                             int slot_index) {
   DCHECK(slot_index >= 0);
   if (FitsInIdx8Operand(slot_index)) {
-    Output(Bytecode::kLdaContextSlot, context.ToOperand(),
+    Output(Bytecode::kLdaContextSlot, context.ToRawOperand(),
            static_cast<uint8_t>(slot_index));
   } else if (FitsInIdx16Operand(slot_index)) {
-    Output(Bytecode::kLdaContextSlotWide, context.ToOperand(),
+    Output(Bytecode::kLdaContextSlotWide, context.ToRawOperand(),
            static_cast<uint16_t>(slot_index));
   } else {
     UNIMPLEMENTED();
@@ -445,10 +421,10 @@
                                                              int slot_index) {
   DCHECK(slot_index >= 0);
   if (FitsInIdx8Operand(slot_index)) {
-    Output(Bytecode::kStaContextSlot, context.ToOperand(),
+    Output(Bytecode::kStaContextSlot, context.ToRawOperand(),
            static_cast<uint8_t>(slot_index));
   } else if (FitsInIdx16Operand(slot_index)) {
-    Output(Bytecode::kStaContextSlotWide, context.ToOperand(),
+    Output(Bytecode::kStaContextSlotWide, context.ToRawOperand(),
            static_cast<uint16_t>(slot_index));
   } else {
     UNIMPLEMENTED();
@@ -490,18 +466,16 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
-    Register object, const Handle<String> name, int feedback_slot,
-    LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForLoadIC(language_mode);
+    Register object, const Handle<Name> name, int feedback_slot) {
   size_t name_index = GetConstantPoolEntry(name);
   if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
+    Output(Bytecode::kLoadIC, object.ToRawOperand(),
+           static_cast<uint8_t>(name_index),
            static_cast<uint8_t>(feedback_slot));
   } else if (FitsInIdx16Operand(name_index) &&
              FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+    Output(Bytecode::kLoadICWide, object.ToRawOperand(),
            static_cast<uint16_t>(name_index),
            static_cast<uint16_t>(feedback_slot));
   } else {
@@ -510,14 +484,13 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
-    Register object, int feedback_slot, LanguageMode language_mode) {
-  Bytecode bytecode = BytecodeForKeyedLoadIC(language_mode);
+    Register object, int feedback_slot) {
   if (FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToOperand(), static_cast<uint8_t>(feedback_slot));
+    Output(Bytecode::kKeyedLoadIC, object.ToRawOperand(),
+           static_cast<uint8_t>(feedback_slot));
   } else if (FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+    Output(Bytecode::kKeyedLoadICWide, object.ToRawOperand(),
            static_cast<uint16_t>(feedback_slot));
   } else {
     UNIMPLEMENTED();
@@ -525,18 +498,17 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
-    Register object, const Handle<String> name, int feedback_slot,
+    Register object, const Handle<Name> name, int feedback_slot,
     LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForStoreIC(language_mode);
   size_t name_index = GetConstantPoolEntry(name);
   if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
+    Output(bytecode, object.ToRawOperand(), static_cast<uint8_t>(name_index),
            static_cast<uint8_t>(feedback_slot));
   } else if (FitsInIdx16Operand(name_index) &&
              FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+    Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
            static_cast<uint16_t>(name_index),
            static_cast<uint16_t>(feedback_slot));
   } else {
@@ -551,11 +523,11 @@
     LanguageMode language_mode) {
   Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
   if (FitsInIdx8Operand(feedback_slot)) {
-    Output(bytecode, object.ToOperand(), key.ToOperand(),
+    Output(bytecode, object.ToRawOperand(), key.ToRawOperand(),
            static_cast<uint8_t>(feedback_slot));
   } else if (FitsInIdx16Operand(feedback_slot)) {
-    Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
-           key.ToOperand(), static_cast<uint16_t>(feedback_slot));
+    Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
+           key.ToRawOperand(), static_cast<uint16_t>(feedback_slot));
   } else {
     UNIMPLEMENTED();
   }
@@ -653,13 +625,13 @@
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
-  Output(Bytecode::kPushContext, context.ToOperand());
+  Output(Bytecode::kPushContext, context.ToRawOperand());
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
-  Output(Bytecode::kPopContext, context.ToOperand());
+  Output(Bytecode::kPopContext, context.ToRawOperand());
   return *this;
 }
 
@@ -766,6 +738,8 @@
       return Bytecode::kJumpIfToBooleanTrueConstant;
     case Bytecode::kJumpIfToBooleanFalse:
       return Bytecode::kJumpIfToBooleanFalseConstant;
+    case Bytecode::kJumpIfNotHole:
+      return Bytecode::kJumpIfNotHoleConstant;
     case Bytecode::kJumpIfNull:
       return Bytecode::kJumpIfNullConstant;
     case Bytecode::kJumpIfUndefined:
@@ -791,6 +765,8 @@
       return Bytecode::kJumpIfToBooleanTrueConstantWide;
     case Bytecode::kJumpIfToBooleanFalse:
       return Bytecode::kJumpIfToBooleanFalseConstantWide;
+    case Bytecode::kJumpIfNotHole:
+      return Bytecode::kJumpIfNotHoleConstantWide;
     case Bytecode::kJumpIfNull:
       return Bytecode::kJumpIfNullConstantWide;
     case Bytecode::kJumpIfUndefined:
@@ -808,6 +784,7 @@
     case Bytecode::kJump:
     case Bytecode::kJumpIfNull:
     case Bytecode::kJumpIfUndefined:
+    case Bytecode::kJumpIfNotHole:
       return jump_bytecode;
     case Bytecode::kJumpIfTrue:
       return Bytecode::kJumpIfToBooleanTrue;
@@ -883,7 +860,10 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
                                                        BytecodeLabel* label) {
   // Don't emit dead code.
-  if (exit_seen_in_block_) return *this;
+  if (exit_seen_in_block_) {
+    source_position_table_builder_.RevertPosition(bytecodes()->size());
+    return *this;
+  }
 
   // Check if the value in accumulator is boolean, if not choose an
   // appropriate JumpIfToBoolean bytecode.
@@ -965,6 +945,15 @@
   return OutputJump(Bytecode::kJumpIfUndefined, label);
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck() {
+  Output(Bytecode::kStackCheck);
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
+    BytecodeLabel* label) {
+  return OutputJump(Bytecode::kJumpIfNotHole, label);
+}
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
   Output(Bytecode::kThrow);
@@ -973,40 +962,86 @@
 }
 
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
+  Output(Bytecode::kReThrow);
+  exit_seen_in_block_ = true;
+  return *this;
+}
+
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
   Output(Bytecode::kReturn);
   exit_seen_in_block_ = true;
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
+  Output(Bytecode::kDebugger);
+  return *this;
+}
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
-    Register cache_type, Register cache_array, Register cache_length) {
-  Output(Bytecode::kForInPrepare, cache_type.ToOperand(),
-         cache_array.ToOperand(), cache_length.ToOperand());
+    Register cache_info_triple) {
+  if (FitsInReg8Operand(cache_info_triple)) {
+    Output(Bytecode::kForInPrepare, cache_info_triple.ToRawOperand());
+  } else if (FitsInReg16Operand(cache_info_triple)) {
+    Output(Bytecode::kForInPrepareWide, cache_info_triple.ToRawOperand());
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
                                                       Register cache_length) {
-  Output(Bytecode::kForInDone, index.ToOperand(), cache_length.ToOperand());
+  Output(Bytecode::kForInDone, index.ToRawOperand(),
+         cache_length.ToRawOperand());
   return *this;
 }
 
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(Register receiver,
-                                                      Register cache_type,
-                                                      Register cache_array,
-                                                      Register index) {
-  Output(Bytecode::kForInNext, receiver.ToOperand(), cache_type.ToOperand(),
-         cache_array.ToOperand(), index.ToOperand());
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
+    Register receiver, Register index, Register cache_type_array_pair) {
+  if (FitsInReg8Operand(receiver) && FitsInReg8Operand(index) &&
+      FitsInReg8Operand(cache_type_array_pair)) {
+    Output(Bytecode::kForInNext, receiver.ToRawOperand(), index.ToRawOperand(),
+           cache_type_array_pair.ToRawOperand());
+  } else if (FitsInReg16Operand(receiver) && FitsInReg16Operand(index) &&
+             FitsInReg16Operand(cache_type_array_pair)) {
+    Output(Bytecode::kForInNextWide, receiver.ToRawOperand(),
+           index.ToRawOperand(), cache_type_array_pair.ToRawOperand());
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
-  Output(Bytecode::kForInStep, index.ToOperand());
+  Output(Bytecode::kForInStep, index.ToRawOperand());
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(int handler_id,
+                                                        bool will_catch) {
+  handler_table_builder()->SetHandlerTarget(handler_id, bytecodes()->size());
+  handler_table_builder()->SetPrediction(handler_id, will_catch);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
+                                                         Register context) {
+  handler_table_builder()->SetTryRegionStart(handler_id, bytecodes()->size());
+  handler_table_builder()->SetContextRegister(handler_id, context);
+  return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
+  handler_table_builder()->SetTryRegionEnd(handler_id, bytecodes()->size());
   return *this;
 }
 
@@ -1016,27 +1051,33 @@
   exit_seen_in_block_ = false;
 }
 
-
-void BytecodeArrayBuilder::EnsureReturn() {
+void BytecodeArrayBuilder::EnsureReturn(FunctionLiteral* literal) {
   if (!exit_seen_in_block_) {
     LoadUndefined();
+    SetReturnPosition(literal);
     Return();
   }
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
-                                                 Register receiver,
-                                                 size_t arg_count,
-                                                 int feedback_slot) {
-  if (FitsInIdx8Operand(arg_count) && FitsInIdx8Operand(feedback_slot)) {
-    Output(Bytecode::kCall, callable.ToOperand(), receiver.ToOperand(),
-           static_cast<uint8_t>(arg_count),
+                                                 Register receiver_args,
+                                                 size_t receiver_args_count,
+                                                 int feedback_slot,
+                                                 TailCallMode tail_call_mode) {
+  Bytecode bytecode = BytecodeForCall(tail_call_mode);
+  if (FitsInReg8Operand(callable) && FitsInReg8Operand(receiver_args) &&
+      FitsInIdx8Operand(receiver_args_count) &&
+      FitsInIdx8Operand(feedback_slot)) {
+    Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
+           static_cast<uint8_t>(receiver_args_count),
            static_cast<uint8_t>(feedback_slot));
-  } else if (FitsInIdx16Operand(arg_count) &&
+  } else if (FitsInReg16Operand(callable) &&
+             FitsInReg16Operand(receiver_args) &&
+             FitsInIdx16Operand(receiver_args_count) &&
              FitsInIdx16Operand(feedback_slot)) {
-    Output(Bytecode::kCallWide, callable.ToOperand(), receiver.ToOperand(),
-           static_cast<uint16_t>(arg_count),
+    bytecode = BytecodeForWideOperands(bytecode);
+    Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
+           static_cast<uint16_t>(receiver_args_count),
            static_cast<uint16_t>(feedback_slot));
   } else {
     UNIMPLEMENTED();
@@ -1044,7 +1085,6 @@
   return *this;
 }
 
-
 BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
                                                 Register first_arg,
                                                 size_t arg_count) {
@@ -1052,9 +1092,17 @@
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  DCHECK(FitsInIdx8Operand(arg_count));
-  Output(Bytecode::kNew, constructor.ToOperand(), first_arg.ToOperand(),
-         static_cast<uint8_t>(arg_count));
+  if (FitsInReg8Operand(constructor) && FitsInReg8Operand(first_arg) &&
+      FitsInIdx8Operand(arg_count)) {
+    Output(Bytecode::kNew, constructor.ToRawOperand(), first_arg.ToRawOperand(),
+           static_cast<uint8_t>(arg_count));
+  } else if (FitsInReg16Operand(constructor) && FitsInReg16Operand(first_arg) &&
+             FitsInIdx16Operand(arg_count)) {
+    Output(Bytecode::kNewWide, constructor.ToRawOperand(),
+           first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
@@ -1063,13 +1111,19 @@
     Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
   DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
   DCHECK(FitsInIdx16Operand(function_id));
-  DCHECK(FitsInIdx8Operand(arg_count));
   if (!first_arg.is_valid()) {
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
-         first_arg.ToOperand(), static_cast<uint8_t>(arg_count));
+  if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count)) {
+    Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
+           first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count));
+  } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count)) {
+    Output(Bytecode::kCallRuntimeWide, static_cast<uint16_t>(function_id),
+           first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
@@ -1079,38 +1133,49 @@
     Register first_return) {
   DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
   DCHECK(FitsInIdx16Operand(function_id));
-  DCHECK(FitsInIdx8Operand(arg_count));
   if (!first_arg.is_valid()) {
     DCHECK_EQ(0u, arg_count);
     first_arg = Register(0);
   }
-  Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
-         first_arg.ToOperand(), static_cast<uint8_t>(arg_count),
-         first_return.ToOperand());
+  if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count) &&
+      FitsInReg8Operand(first_return)) {
+    Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
+           first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count),
+           first_return.ToRawOperand());
+  } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count) &&
+             FitsInReg16Operand(first_return)) {
+    Output(Bytecode::kCallRuntimeForPairWide,
+           static_cast<uint16_t>(function_id), first_arg.ToRawOperand(),
+           static_cast<uint16_t>(arg_count), first_return.ToRawOperand());
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
-                                                          Register receiver,
-                                                          size_t arg_count) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
+    int context_index, Register receiver_args, size_t receiver_args_count) {
   DCHECK(FitsInIdx16Operand(context_index));
-  DCHECK(FitsInIdx8Operand(arg_count));
-  Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
-         receiver.ToOperand(), static_cast<uint8_t>(arg_count));
+  if (FitsInReg8Operand(receiver_args) &&
+      FitsInIdx8Operand(receiver_args_count)) {
+    Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
+           receiver_args.ToRawOperand(),
+           static_cast<uint8_t>(receiver_args_count));
+  } else if (FitsInReg16Operand(receiver_args) &&
+             FitsInIdx16Operand(receiver_args_count)) {
+    Output(Bytecode::kCallJSRuntimeWide, static_cast<uint16_t>(context_index),
+           receiver_args.ToRawOperand(),
+           static_cast<uint16_t>(receiver_args_count));
+  } else {
+    UNIMPLEMENTED();
+  }
   return *this;
 }
 
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
                                                    LanguageMode language_mode) {
-  Output(BytecodeForDelete(language_mode), object.ToOperand());
-  return *this;
-}
-
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::DeleteLookupSlot() {
-  Output(Bytecode::kDeleteLookupSlot);
+  Output(BytecodeForDelete(language_mode), object.ToRawOperand());
   return *this;
 }
 
@@ -1119,126 +1184,62 @@
   return constant_array_builder()->Insert(object);
 }
 
-
-int BytecodeArrayBuilder::BorrowTemporaryRegister() {
-  if (free_temporaries_.empty()) {
-    temporary_register_count_ += 1;
-    return last_temporary_register().index();
-  } else {
-    auto pos = free_temporaries_.begin();
-    int retval = *pos;
-    free_temporaries_.erase(pos);
-    return retval;
-  }
+void BytecodeArrayBuilder::SetReturnPosition(FunctionLiteral* fun) {
+  int pos = std::max(fun->start_position(), fun->end_position() - 1);
+  source_position_table_builder_.AddStatementPosition(bytecodes_.size(), pos);
 }
 
-
-int BytecodeArrayBuilder::BorrowTemporaryRegisterNotInRange(int start_index,
-                                                            int end_index) {
-  auto index = free_temporaries_.lower_bound(start_index);
-  if (index == free_temporaries_.begin()) {
-    // If start_index is the first free register, check for a register
-    // greater than end_index.
-    index = free_temporaries_.upper_bound(end_index);
-    if (index == free_temporaries_.end()) {
-      temporary_register_count_ += 1;
-      return last_temporary_register().index();
-    }
-  } else {
-    // If there is a free register < start_index
-    index--;
-  }
-
-  int retval = *index;
-  free_temporaries_.erase(index);
-  return retval;
+void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
+  if (stmt->position() == RelocInfo::kNoPosition) return;
+  source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
+                                                      stmt->position());
 }
 
-
-void BytecodeArrayBuilder::BorrowConsecutiveTemporaryRegister(int reg_index) {
-  DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
-  free_temporaries_.erase(reg_index);
+void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
+  if (expr->position() == RelocInfo::kNoPosition) return;
+  source_position_table_builder_.AddExpressionPosition(bytecodes_.size(),
+                                                       expr->position());
 }
 
-
-void BytecodeArrayBuilder::ReturnTemporaryRegister(int reg_index) {
-  DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
-  free_temporaries_.insert(reg_index);
-}
-
-
-int BytecodeArrayBuilder::PrepareForConsecutiveTemporaryRegisters(
-    size_t count) {
-  if (count == 0) {
-    return -1;
-  }
-
-  // Search within existing temporaries for a run.
-  auto start = free_temporaries_.begin();
-  size_t run_length = 0;
-  for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) {
-    if (*run_end != *start + static_cast<int>(run_length)) {
-      start = run_end;
-      run_length = 0;
-    }
-    if (++run_length == count) {
-      return *start;
-    }
-  }
-
-  // Continue run if possible across existing last temporary.
-  if (temporary_register_count_ > 0 &&
-      (start == free_temporaries_.end() ||
-       *start + static_cast<int>(run_length) !=
-           last_temporary_register().index() + 1)) {
-    run_length = 0;
-  }
-
-  // Ensure enough registers for run.
-  while (run_length++ < count) {
-    temporary_register_count_++;
-    free_temporaries_.insert(last_temporary_register().index());
-  }
-  return last_temporary_register().index() - static_cast<int>(count) + 1;
-}
-
-
 bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
-  if (temporary_register_count_ > 0) {
-    DCHECK(reg.index() >= first_temporary_register().index() &&
-           reg.index() <= last_temporary_register().index());
-    return free_temporaries_.find(reg.index()) == free_temporaries_.end();
-  } else {
-    return false;
-  }
+  return temporary_register_allocator()->RegisterIsLive(reg);
 }
 
-
-bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
-  if (reg.is_function_context() || reg.is_function_closure() ||
-      reg.is_new_target()) {
-    return true;
-  } else if (reg.is_parameter()) {
-    int parameter_index = reg.ToParameterIndex(parameter_count_);
-    return parameter_index >= 0 && parameter_index < parameter_count_;
-  } else if (reg.index() < fixed_register_count()) {
-    return true;
-  } else {
-    return TemporaryRegisterIsLive(reg);
-  }
-}
-
-
 bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
                                           uint32_t operand_value) const {
   OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
   switch (operand_type) {
     case OperandType::kNone:
       return false;
-    case OperandType::kCount16:
+    case OperandType::kRegCount16: {
+      // Expect kRegCount16 is part of a range previous operand is a
+      // valid operand to start a range.
+      if (operand_index > 0) {
+        OperandType previous_operand_type =
+            Bytecodes::GetOperandType(bytecode, operand_index - 1);
+        return ((previous_operand_type == OperandType::kMaybeReg16 ||
+                 previous_operand_type == OperandType::kReg16) &&
+                static_cast<uint16_t>(operand_value) == operand_value);
+      } else {
+        return false;
+      }
+    }
+    case OperandType::kRegCount8: {
+      // Expect kRegCount8 is part of a range previous operand is a
+      // valid operand to start a range.
+      if (operand_index > 0) {
+        OperandType previous_operand_type =
+            Bytecodes::GetOperandType(bytecode, operand_index - 1);
+        return ((previous_operand_type == OperandType::kMaybeReg8 ||
+                 previous_operand_type == OperandType::kReg8 ||
+                 previous_operand_type == OperandType::kMaybeReg16) &&
+                static_cast<uint8_t>(operand_value) == operand_value);
+      } else {
+        return false;
+      }
+    }
     case OperandType::kIdx16:
       return static_cast<uint16_t>(operand_value) == operand_value;
-    case OperandType::kCount8:
     case OperandType::kImm8:
     case OperandType::kIdx8:
       return static_cast<uint8_t>(operand_value) == operand_value;
@@ -1248,27 +1249,84 @@
       }
     // Fall-through to kReg8 case.
     case OperandType::kReg8:
-      return RegisterIsValid(
-          Register::FromOperand(static_cast<uint8_t>(operand_value)));
-    case OperandType::kRegPair8: {
-      Register reg0 =
-          Register::FromOperand(static_cast<uint8_t>(operand_value));
+    case OperandType::kRegOut8:
+      return RegisterIsValid(Register::FromRawOperand(operand_value),
+                             operand_type);
+    case OperandType::kRegOutPair8:
+    case OperandType::kRegOutPair16:
+    case OperandType::kRegPair8:
+    case OperandType::kRegPair16: {
+      Register reg0 = Register::FromRawOperand(operand_value);
       Register reg1 = Register(reg0.index() + 1);
-      return RegisterIsValid(reg0) && RegisterIsValid(reg1);
+      return RegisterIsValid(reg0, operand_type) &&
+             RegisterIsValid(reg1, operand_type);
     }
-    case OperandType::kReg16:
-      if (bytecode != Bytecode::kExchange &&
-          bytecode != Bytecode::kExchangeWide) {
-        return false;
+    case OperandType::kRegOutTriple8:
+    case OperandType::kRegOutTriple16: {
+      Register reg0 = Register::FromRawOperand(operand_value);
+      Register reg1 = Register(reg0.index() + 1);
+      Register reg2 = Register(reg0.index() + 2);
+      return RegisterIsValid(reg0, operand_type) &&
+             RegisterIsValid(reg1, operand_type) &&
+             RegisterIsValid(reg2, operand_type);
+    }
+    case OperandType::kMaybeReg16:
+      if (operand_value == 0) {
+        return true;
       }
-      return RegisterIsValid(
-          Register::FromWideOperand(static_cast<uint16_t>(operand_value)));
+    // Fall-through to kReg16 case.
+    case OperandType::kReg16:
+    case OperandType::kRegOut16: {
+      Register reg = Register::FromRawOperand(operand_value);
+      return RegisterIsValid(reg, operand_type);
+    }
   }
   UNREACHABLE();
   return false;
 }
 
 
+bool BytecodeArrayBuilder::RegisterIsValid(Register reg,
+                                           OperandType reg_type) const {
+  if (!reg.is_valid()) {
+    return false;
+  }
+
+  switch (Bytecodes::SizeOfOperand(reg_type)) {
+    case OperandSize::kByte:
+      if (!FitsInReg8OperandUntranslated(reg)) {
+        return false;
+      }
+      break;
+    case OperandSize::kShort:
+      if (!FitsInReg16OperandUntranslated(reg)) {
+        return false;
+      }
+      break;
+    case OperandSize::kNone:
+      UNREACHABLE();
+      return false;
+  }
+
+  if (reg.is_current_context() || reg.is_function_closure() ||
+      reg.is_new_target()) {
+    return true;
+  } else if (reg.is_parameter()) {
+    int parameter_index = reg.ToParameterIndex(parameter_count());
+    return parameter_index >= 0 && parameter_index < parameter_count();
+  } else if (RegisterTranslator::InTranslationWindow(reg)) {
+    return translation_register_count() > 0;
+  } else {
+    reg = RegisterTranslator::UntranslateRegister(reg);
+    if (reg.index() < fixed_register_count()) {
+      return true;
+    } else {
+      return TemporaryRegisterIsLive(reg);
+    }
+  }
+}
+
+
 bool BytecodeArrayBuilder::LastBytecodeInSameBlock() const {
   return last_bytecode_start_ < bytecodes()->size() &&
          last_bytecode_start_ >= last_block_end_;
@@ -1279,9 +1337,10 @@
   if (LastBytecodeInSameBlock()) {
     PreviousBytecodeHelper previous_bytecode(*this);
     Bytecode bytecode = previous_bytecode.GetBytecode();
-    if ((bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) &&
-        (reg == Register::FromOperand(previous_bytecode.GetOperand(0)))) {
-      return true;
+    if (bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) {
+      Register previous_reg =
+          Register::FromOperand(previous_bytecode.GetOperand(0));
+      return previous_reg == reg;
     }
   }
   return false;
@@ -1367,14 +1426,14 @@
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
   switch (bytecode) {
-    case Bytecode::kLoadICSloppy:
-      return Bytecode::kLoadICSloppyWide;
-    case Bytecode::kLoadICStrict:
-      return Bytecode::kLoadICStrictWide;
-    case Bytecode::kKeyedLoadICSloppy:
-      return Bytecode::kKeyedLoadICSloppyWide;
-    case Bytecode::kKeyedLoadICStrict:
-      return Bytecode::kKeyedLoadICStrictWide;
+    case Bytecode::kCall:
+      return Bytecode::kCallWide;
+    case Bytecode::kTailCall:
+      return Bytecode::kTailCallWide;
+    case Bytecode::kLoadIC:
+      return Bytecode::kLoadICWide;
+    case Bytecode::kKeyedLoadIC:
+      return Bytecode::kKeyedLoadICWide;
     case Bytecode::kStoreICSloppy:
       return Bytecode::kStoreICSloppyWide;
     case Bytecode::kStoreICStrict:
@@ -1383,14 +1442,10 @@
       return Bytecode::kKeyedStoreICSloppyWide;
     case Bytecode::kKeyedStoreICStrict:
       return Bytecode::kKeyedStoreICStrictWide;
-    case Bytecode::kLdaGlobalSloppy:
-      return Bytecode::kLdaGlobalSloppyWide;
-    case Bytecode::kLdaGlobalStrict:
-      return Bytecode::kLdaGlobalStrictWide;
-    case Bytecode::kLdaGlobalInsideTypeofSloppy:
-      return Bytecode::kLdaGlobalInsideTypeofSloppyWide;
-    case Bytecode::kLdaGlobalInsideTypeofStrict:
-      return Bytecode::kLdaGlobalInsideTypeofStrictWide;
+    case Bytecode::kLdaGlobal:
+      return Bytecode::kLdaGlobalWide;
+    case Bytecode::kLdaGlobalInsideTypeof:
+      return Bytecode::kLdaGlobalInsideTypeofWide;
     case Bytecode::kStaGlobalSloppy:
       return Bytecode::kStaGlobalSloppyWide;
     case Bytecode::kStaGlobalStrict:
@@ -1411,39 +1466,6 @@
 
 
 // static
-Bytecode BytecodeArrayBuilder::BytecodeForLoadIC(LanguageMode language_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return Bytecode::kLoadICSloppy;
-    case STRICT:
-      return Bytecode::kLoadICStrict;
-    case STRONG:
-      UNIMPLEMENTED();
-    default:
-      UNREACHABLE();
-  }
-  return static_cast<Bytecode>(-1);
-}
-
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForKeyedLoadIC(
-    LanguageMode language_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return Bytecode::kKeyedLoadICSloppy;
-    case STRICT:
-      return Bytecode::kKeyedLoadICStrict;
-    case STRONG:
-      UNIMPLEMENTED();
-    default:
-      UNREACHABLE();
-  }
-  return static_cast<Bytecode>(-1);
-}
-
-
-// static
 Bytecode BytecodeArrayBuilder::BytecodeForStoreIC(LanguageMode language_mode) {
   switch (language_mode) {
     case SLOPPY:
@@ -1477,23 +1499,9 @@
 
 
 // static
-Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(LanguageMode language_mode,
-                                                     TypeofMode typeof_mode) {
-  switch (language_mode) {
-    case SLOPPY:
-      return typeof_mode == INSIDE_TYPEOF
-                 ? Bytecode::kLdaGlobalInsideTypeofSloppy
-                 : Bytecode::kLdaGlobalSloppy;
-    case STRICT:
-      return typeof_mode == INSIDE_TYPEOF
-                 ? Bytecode::kLdaGlobalInsideTypeofStrict
-                 : Bytecode::kLdaGlobalStrict;
-    case STRONG:
-      UNIMPLEMENTED();
-    default:
-      UNREACHABLE();
-  }
-  return static_cast<Bytecode>(-1);
+Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(TypeofMode typeof_mode) {
+  return typeof_mode == INSIDE_TYPEOF ? Bytecode::kLdaGlobalInsideTypeof
+                                      : Bytecode::kLdaGlobal;
 }
 
 
@@ -1530,7 +1538,6 @@
   return static_cast<Bytecode>(-1);
 }
 
-
 // static
 Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
     CreateArgumentsType type) {
@@ -1539,9 +1546,10 @@
       return Bytecode::kCreateMappedArguments;
     case CreateArgumentsType::kUnmappedArguments:
       return Bytecode::kCreateUnmappedArguments;
-    default:
-      UNREACHABLE();
+    case CreateArgumentsType::kRestParameter:
+      return Bytecode::kCreateRestParameter;
   }
+  UNREACHABLE();
   return static_cast<Bytecode>(-1);
 }
 
@@ -1561,6 +1569,18 @@
   return static_cast<Bytecode>(-1);
 }
 
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForCall(TailCallMode tail_call_mode) {
+  switch (tail_call_mode) {
+    case TailCallMode::kDisallow:
+      return Bytecode::kCall;
+    case TailCallMode::kAllow:
+      return Bytecode::kTailCall;
+    default:
+      UNREACHABLE();
+  }
+  return static_cast<Bytecode>(-1);
+}
 
 // static
 bool BytecodeArrayBuilder::FitsInIdx8Operand(int value) {
@@ -1594,13 +1614,23 @@
 
 // static
 bool BytecodeArrayBuilder::FitsInReg8Operand(Register value) {
-  return kMinInt8 <= value.index() && value.index() <= kMaxInt8;
+  return RegisterTranslator::FitsInReg8Operand(value);
+}
+
+// static
+bool BytecodeArrayBuilder::FitsInReg8OperandUntranslated(Register value) {
+  return value.is_byte_operand();
 }
 
 
 // static
 bool BytecodeArrayBuilder::FitsInReg16Operand(Register value) {
-  return kMinInt16 <= value.index() && value.index() <= kMaxInt16;
+  return RegisterTranslator::FitsInReg16Operand(value);
+}
+
+// static
+bool BytecodeArrayBuilder::FitsInReg16OperandUntranslated(Register value) {
+  return value.is_short_operand();
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index 7c23dc3..fe69337 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -6,8 +6,12 @@
 #define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
 
 #include "src/ast/ast.h"
+#include "src/interpreter/bytecode-register-allocator.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/constant-array-builder.h"
+#include "src/interpreter/handler-table-builder.h"
+#include "src/interpreter/register-translator.h"
+#include "src/interpreter/source-position-table.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
@@ -18,36 +22,29 @@
 namespace interpreter {
 
 class BytecodeLabel;
-class ConstantArrayBuilder;
 class Register;
 
-// TODO(rmcilroy): Unify this with CreateArgumentsParameters::Type in Turbofan
-// when rest parameters implementation has settled down.
-enum class CreateArgumentsType { kMappedArguments, kUnmappedArguments };
-
-class BytecodeArrayBuilder final {
+class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
  public:
-  BytecodeArrayBuilder(Isolate* isolate, Zone* zone);
+  BytecodeArrayBuilder(Isolate* isolate, Zone* zone, int parameter_count,
+                       int context_count, int locals_count);
   ~BytecodeArrayBuilder();
 
   Handle<BytecodeArray> ToBytecodeArray();
 
-  // Set the number of parameters expected by function.
-  void set_parameter_count(int number_of_params);
+  // Get the number of parameters expected by function.
   int parameter_count() const {
     DCHECK_GE(parameter_count_, 0);
     return parameter_count_;
   }
 
-  // Set the number of locals required for bytecode array.
-  void set_locals_count(int number_of_locals);
+  // Get the number of locals required for bytecode array.
   int locals_count() const {
     DCHECK_GE(local_register_count_, 0);
     return local_register_count_;
   }
 
-  // Set number of contexts required for bytecode array.
-  void set_context_count(int number_of_contexts);
+  // Get number of contexts required for bytecode array.
   int context_count() const {
     DCHECK_GE(context_register_count_, 0);
     return context_register_count_;
@@ -59,14 +56,30 @@
   // Returns the number of fixed (non-temporary) registers.
   int fixed_register_count() const { return context_count() + locals_count(); }
 
+  // Returns the number of fixed and temporary registers.
+  int fixed_and_temporary_register_count() const {
+    return fixed_register_count() + temporary_register_count();
+  }
+
+  int temporary_register_count() const {
+    return temporary_register_allocator()->allocation_count();
+  }
+
+  // Returns the number of registers used for translating wide
+  // register operands into byte sized register operands.
+  int translation_register_count() const {
+    return RegisterTranslator::RegisterCountAdjustment(
+        fixed_and_temporary_register_count(), parameter_count());
+  }
+
   Register Parameter(int parameter_index) const;
 
   // Return true if the register |reg| represents a parameter or a
   // local.
   bool RegisterIsParameterOrLocal(Register reg) const;
 
-  // Return true if the register |reg| represents a temporary register.
-  bool RegisterIsTemporary(Register reg) const;
+  // Returns true if the register |reg| is a live temporary register.
+  bool TemporaryRegisterIsLive(Register reg) const;
 
   // Constant loads to accumulator.
   BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
@@ -80,7 +93,6 @@
 
   // Global loads to the accumulator and stores from the accumulator.
   BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
-                                   LanguageMode language_mode,
                                    TypeofMode typeof_mode);
   BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
                                     int feedback_slot,
@@ -98,20 +110,17 @@
 
   // Register-register transfer.
   BytecodeArrayBuilder& MoveRegister(Register from, Register to);
-  BytecodeArrayBuilder& ExchangeRegisters(Register reg0, Register reg1);
 
   // Named load property.
   BytecodeArrayBuilder& LoadNamedProperty(Register object,
-                                          const Handle<String> name,
-                                          int feedback_slot,
-                                          LanguageMode language_mode);
+                                          const Handle<Name> name,
+                                          int feedback_slot);
   // Keyed load property. The key should be in the accumulator.
-  BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot,
-                                          LanguageMode language_mode);
+  BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot);
 
   // Store properties. The value to be stored should be in the accumulator.
   BytecodeArrayBuilder& StoreNamedProperty(Register object,
-                                           const Handle<String> name,
+                                           const Handle<Name> name,
                                            int feedback_slot,
                                            LanguageMode language_mode);
   BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
@@ -149,44 +158,51 @@
   BytecodeArrayBuilder& PopContext(Register context);
 
   // Call a JS function. The JSFunction or Callable to be called should be in
-  // |callable|, the receiver should be in |receiver| and all subsequent
-  // arguments should be in registers <receiver + 1> to
-  // <receiver + 1 + arg_count>.
-  BytecodeArrayBuilder& Call(Register callable, Register receiver,
-                             size_t arg_count, int feedback_slot);
+  // |callable|, the receiver should be in |receiver_args| and all subsequent
+  // arguments should be in registers <receiver_args + 1> to
+  // <receiver_args + receiver_arg_count - 1>.
+  BytecodeArrayBuilder& Call(
+      Register callable, Register receiver_args, size_t receiver_arg_count,
+      int feedback_slot, TailCallMode tail_call_mode = TailCallMode::kDisallow);
 
-  // Call the new operator. The |constructor| register is followed by
-  // |arg_count| consecutive registers containing arguments to be
-  // applied to the constructor.
+  BytecodeArrayBuilder& TailCall(Register callable, Register receiver_args,
+                                 size_t receiver_arg_count, int feedback_slot) {
+    return Call(callable, receiver_args, receiver_arg_count, feedback_slot,
+                TailCallMode::kAllow);
+  }
+
+  // Call the new operator. The accumulator holds the |new_target|.
+  // The |constructor| is in a register followed by |arg_count|
+  // consecutive arguments starting at |first_arg| for the constuctor
+  // invocation.
   BytecodeArrayBuilder& New(Register constructor, Register first_arg,
                             size_t arg_count);
 
   // Call the runtime function with |function_id|. The first argument should be
   // in |first_arg| and all subsequent arguments should be in registers
-  // <first_arg + 1> to <first_arg + 1 + arg_count>.
+  // <first_arg + 1> to <first_arg + arg_count - 1>.
   BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
                                     Register first_arg, size_t arg_count);
 
   // Call the runtime function with |function_id| that returns a pair of values.
   // The first argument should be in |first_arg| and all subsequent arguments
-  // should be in registers <first_arg + 1> to <first_arg + 1 + arg_count>. The
+  // should be in registers <first_arg + 1> to <first_arg + arg_count - 1>. The
   // return values will be returned in <first_return> and <first_return + 1>.
   BytecodeArrayBuilder& CallRuntimeForPair(Runtime::FunctionId function_id,
                                            Register first_arg, size_t arg_count,
                                            Register first_return);
 
   // Call the JS runtime function with |context_index|. The the receiver should
-  // be in |receiver| and all subsequent arguments should be in registers
-  // <receiver + 1> to <receiver + 1 + arg_count>.
-  BytecodeArrayBuilder& CallJSRuntime(int context_index, Register receiver,
-                                      size_t arg_count);
+  // be in |receiver_args| and all subsequent arguments should be in registers
+  // <receiver + 1> to <receiver + receiver_args_count - 1>.
+  BytecodeArrayBuilder& CallJSRuntime(int context_index, Register receiver_args,
+                                      size_t receiver_args_count);
 
   // Operators (register holds the lhs value, accumulator holds the rhs value).
-  BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
-                                        Strength strength);
+  BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg);
 
   // Count Operators (value stored in accumulator).
-  BytecodeArrayBuilder& CountOperation(Token::Value op, Strength strength);
+  BytecodeArrayBuilder& CountOperation(Token::Value op);
 
   // Unary Operators.
   BytecodeArrayBuilder& LogicalNot();
@@ -195,11 +211,9 @@
   // Deletes property from an object. This expects that accumulator contains
   // the key to be deleted and the register contains a reference to the object.
   BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
-  BytecodeArrayBuilder& DeleteLookupSlot();
 
   // Tests.
-  BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
-                                         Strength strength);
+  BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg);
 
   // Casts.
   BytecodeArrayBuilder& CastAccumulatorToBoolean();
@@ -214,48 +228,65 @@
   BytecodeArrayBuilder& Jump(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfTrue(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfFalse(BytecodeLabel* label);
+  BytecodeArrayBuilder& JumpIfNotHole(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
 
+  BytecodeArrayBuilder& StackCheck();
+
   BytecodeArrayBuilder& Throw();
+  BytecodeArrayBuilder& ReThrow();
   BytecodeArrayBuilder& Return();
 
+  // Debugger.
+  BytecodeArrayBuilder& Debugger();
+
   // Complex flow control.
-  BytecodeArrayBuilder& ForInPrepare(Register cache_type, Register cache_array,
-                                     Register cache_length);
+  BytecodeArrayBuilder& ForInPrepare(Register cache_info_triple);
   BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
-  BytecodeArrayBuilder& ForInNext(Register receiver, Register cache_type,
-                                  Register cache_array, Register index);
+  BytecodeArrayBuilder& ForInNext(Register receiver, Register index,
+                                  Register cache_type_array_pair);
   BytecodeArrayBuilder& ForInStep(Register index);
 
+  // Exception handling.
+  BytecodeArrayBuilder& MarkHandler(int handler_id, bool will_catch);
+  BytecodeArrayBuilder& MarkTryBegin(int handler_id, Register context);
+  BytecodeArrayBuilder& MarkTryEnd(int handler_id);
+
+  // Creates a new handler table entry and returns a {hander_id} identifying the
+  // entry, so that it can be referenced by above exception handling support.
+  int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
+
+  void SetStatementPosition(Statement* stmt);
+  void SetExpressionPosition(Expression* expr);
+
   // Accessors
   Zone* zone() const { return zone_; }
+  TemporaryRegisterAllocator* temporary_register_allocator() {
+    return &temporary_allocator_;
+  }
+  const TemporaryRegisterAllocator* temporary_register_allocator() const {
+    return &temporary_allocator_;
+  }
+
+  void EnsureReturn(FunctionLiteral* literal);
 
  private:
-  ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
-  const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
-  Isolate* isolate() const { return isolate_; }
-  ConstantArrayBuilder* constant_array_builder() {
-    return &constant_array_builder_;
-  }
-  const ConstantArrayBuilder* constant_array_builder() const {
-    return &constant_array_builder_;
-  }
+  class PreviousBytecodeHelper;
+  friend class BytecodeRegisterAllocator;
 
   static Bytecode BytecodeForBinaryOperation(Token::Value op);
   static Bytecode BytecodeForCountOperation(Token::Value op);
   static Bytecode BytecodeForCompareOperation(Token::Value op);
   static Bytecode BytecodeForWideOperands(Bytecode bytecode);
-  static Bytecode BytecodeForLoadIC(LanguageMode language_mode);
-  static Bytecode BytecodeForKeyedLoadIC(LanguageMode language_mode);
   static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
   static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
-  static Bytecode BytecodeForLoadGlobal(LanguageMode language_mode,
-                                        TypeofMode typeof_mode);
+  static Bytecode BytecodeForLoadGlobal(TypeofMode typeof_mode);
   static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
   static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
   static Bytecode BytecodeForCreateArguments(CreateArgumentsType type);
   static Bytecode BytecodeForDelete(LanguageMode language_mode);
+  static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
 
   static bool FitsInIdx8Operand(int value);
   static bool FitsInIdx8Operand(size_t value);
@@ -263,15 +294,17 @@
   static bool FitsInIdx16Operand(int value);
   static bool FitsInIdx16Operand(size_t value);
   static bool FitsInReg8Operand(Register value);
+  static bool FitsInReg8OperandUntranslated(Register value);
   static bool FitsInReg16Operand(Register value);
+  static bool FitsInReg16OperandUntranslated(Register value);
+
+  // RegisterMover interface.
+  void MoveRegisterUntranslated(Register from, Register to) override;
 
   static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
   static Bytecode GetJumpWithConstantWideOperand(Bytecode jump_smi8_operand);
   static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
 
-  Register MapRegister(Register reg);
-  Register MapRegisters(Register reg, Register args_base, int args_length = 1);
-
   template <size_t N>
   INLINE(void Output(Bytecode bytecode, uint32_t(&operands)[N]));
   void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
@@ -292,49 +325,54 @@
       const ZoneVector<uint8_t>::iterator& jump_location, int delta);
 
   void LeaveBasicBlock();
-  void EnsureReturn();
 
   bool OperandIsValid(Bytecode bytecode, int operand_index,
                       uint32_t operand_value) const;
-  bool LastBytecodeInSameBlock() const;
+  bool RegisterIsValid(Register reg, OperandType reg_type) const;
 
+  bool LastBytecodeInSameBlock() const;
   bool NeedToBooleanCast();
   bool IsRegisterInAccumulator(Register reg);
 
-  bool RegisterIsValid(Register reg) const;
-
-  // Temporary register management.
-  int BorrowTemporaryRegister();
-  int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
-  void ReturnTemporaryRegister(int reg_index);
-  int PrepareForConsecutiveTemporaryRegisters(size_t count);
-  void BorrowConsecutiveTemporaryRegister(int reg_index);
-  bool TemporaryRegisterIsLive(Register reg) const;
-
-  Register first_temporary_register() const;
-  Register last_temporary_register() const;
+  // Set position for implicit return.
+  void SetReturnPosition(FunctionLiteral* fun);
 
   // Gets a constant pool entry for the |object|.
   size_t GetConstantPoolEntry(Handle<Object> object);
 
+  ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
+  const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
+  Isolate* isolate() const { return isolate_; }
+  ConstantArrayBuilder* constant_array_builder() {
+    return &constant_array_builder_;
+  }
+  const ConstantArrayBuilder* constant_array_builder() const {
+    return &constant_array_builder_;
+  }
+  HandlerTableBuilder* handler_table_builder() {
+    return &handler_table_builder_;
+  }
+  SourcePositionTableBuilder* source_position_table_builder() {
+    return &source_position_table_builder_;
+  }
+  RegisterTranslator* register_translator() { return &register_translator_; }
+
   Isolate* isolate_;
   Zone* zone_;
   ZoneVector<uint8_t> bytecodes_;
   bool bytecode_generated_;
   ConstantArrayBuilder constant_array_builder_;
+  HandlerTableBuilder handler_table_builder_;
+  SourcePositionTableBuilder source_position_table_builder_;
   size_t last_block_end_;
   size_t last_bytecode_start_;
   bool exit_seen_in_block_;
   int unbound_jumps_;
-
   int parameter_count_;
   int local_register_count_;
   int context_register_count_;
-  int temporary_register_count_;
-  ZoneSet<int> free_temporaries_;
-
-  class PreviousBytecodeHelper;
-  friend class BytecodeRegisterAllocator;
+  TemporaryRegisterAllocator temporary_allocator_;
+  RegisterTranslator register_translator_;
 
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
 };
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
index d09d72f..0fea985 100644
--- a/src/interpreter/bytecode-array-iterator.cc
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -47,14 +47,14 @@
       bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
       Bytecodes::GetOperandOffset(current_bytecode(), operand_index);
   switch (Bytecodes::SizeOfOperand(operand_type)) {
-    default:
-    case OperandSize::kNone:
-      UNREACHABLE();
     case OperandSize::kByte:
       return static_cast<uint32_t>(*operand_start);
     case OperandSize::kShort:
       return ReadUnalignedUInt16(operand_start);
+    case OperandSize::kNone:
+      UNREACHABLE();
   }
+  return 0;
 }
 
 
@@ -63,12 +63,11 @@
   return static_cast<int8_t>(operand);
 }
 
-
-int BytecodeArrayIterator::GetCountOperand(int operand_index) const {
+int BytecodeArrayIterator::GetRegisterCountOperand(int operand_index) const {
   OperandSize size =
       Bytecodes::GetOperandSize(current_bytecode(), operand_index);
-  OperandType type = (size == OperandSize::kByte) ? OperandType::kCount8
-                                                  : OperandType::kCount16;
+  OperandType type = (size == OperandSize::kByte) ? OperandType::kRegCount8
+                                                  : OperandType::kRegCount16;
   uint32_t operand = GetRawOperand(operand_index, type);
   return static_cast<int>(operand);
 }
@@ -87,19 +86,63 @@
 Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
   OperandType operand_type =
       Bytecodes::GetOperandType(current_bytecode(), operand_index);
-  DCHECK(operand_type == OperandType::kReg8 ||
-         operand_type == OperandType::kRegPair8 ||
-         operand_type == OperandType::kMaybeReg8 ||
-         operand_type == OperandType::kReg16);
+  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
   uint32_t operand = GetRawOperand(operand_index, operand_type);
-  return Register::FromOperand(operand);
+  Register reg;
+  switch (Bytecodes::GetOperandSize(current_bytecode(), operand_index)) {
+    case OperandSize::kByte:
+      reg = Register::FromOperand(static_cast<uint8_t>(operand));
+      break;
+    case OperandSize::kShort:
+      reg = Register::FromWideOperand(static_cast<uint16_t>(operand));
+      break;
+    case OperandSize::kNone:
+      UNREACHABLE();
+      reg = Register::invalid_value();
+      break;
+  }
+  DCHECK_GE(reg.index(),
+            Register::FromParameterIndex(0, bytecode_array()->parameter_count())
+                .index());
+  DCHECK(reg.index() < bytecode_array()->register_count() ||
+         (reg.index() == 0 &&
+          Bytecodes::IsMaybeRegisterOperandType(
+              Bytecodes::GetOperandType(current_bytecode(), operand_index))));
+  return reg;
 }
 
+int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
+  interpreter::OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+  switch (operand_type) {
+    case OperandType::kRegPair8:
+    case OperandType::kRegPair16:
+    case OperandType::kRegOutPair8:
+    case OperandType::kRegOutPair16:
+      return 2;
+    case OperandType::kRegOutTriple8:
+    case OperandType::kRegOutTriple16:
+      return 3;
+    default: {
+      if (operand_index + 1 !=
+          Bytecodes::NumberOfOperands(current_bytecode())) {
+        OperandType next_operand_type =
+            Bytecodes::GetOperandType(current_bytecode(), operand_index + 1);
+        if (Bytecodes::IsRegisterCountOperandType(next_operand_type)) {
+          return GetRegisterCountOperand(operand_index + 1);
+        }
+      }
+      return 1;
+    }
+  }
+}
 
 Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
     int operand_index) const {
-  Handle<FixedArray> constants = handle(bytecode_array()->constant_pool());
-  return FixedArray::get(constants, GetIndexOperand(operand_index));
+  return FixedArray::get(bytecode_array()->constant_pool(),
+                         GetIndexOperand(operand_index),
+                         bytecode_array()->GetIsolate());
 }
 
 
diff --git a/src/interpreter/bytecode-array-iterator.h b/src/interpreter/bytecode-array-iterator.h
index e67fa97..5379bbf 100644
--- a/src/interpreter/bytecode-array-iterator.h
+++ b/src/interpreter/bytecode-array-iterator.h
@@ -21,6 +21,7 @@
   bool done() const;
   Bytecode current_bytecode() const;
   int current_bytecode_size() const;
+  void set_current_offset(int offset) { bytecode_offset_ = offset; }
   int current_offset() const { return bytecode_offset_; }
   const Handle<BytecodeArray>& bytecode_array() const {
     return bytecode_array_;
@@ -28,8 +29,9 @@
 
   int8_t GetImmediateOperand(int operand_index) const;
   int GetIndexOperand(int operand_index) const;
-  int GetCountOperand(int operand_index) const;
+  int GetRegisterCountOperand(int operand_index) const;
   Register GetRegisterOperand(int operand_index) const;
+  int GetRegisterOperandRange(int operand_index) const;
   Handle<Object> GetConstantForIndexOperand(int operand_index) const;
 
   // Get the raw byte for the given operand. Note: you should prefer using the
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index 959e155..6f4dc27 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -27,19 +27,26 @@
       : generator_(generator),
         scope_(scope),
         outer_(generator_->execution_context()),
-        register_(generator_->NextContextRegister()),
+        register_(Register::current_context()),
         depth_(0),
         should_pop_context_(should_pop_context) {
     if (outer_) {
       depth_ = outer_->depth_ + 1;
-      generator_->builder()->PushContext(register_);
+
+      // Push the outer context into a new context register.
+      Register outer_context_reg(builder()->first_context_register().index() +
+                                 outer_->depth_);
+      outer_->set_register(outer_context_reg);
+      generator_->builder()->PushContext(outer_context_reg);
     }
     generator_->set_execution_context(this);
   }
 
   ~ContextScope() {
     if (outer_ && should_pop_context_) {
+      DCHECK_EQ(register_.index(), Register::current_context().index());
       generator_->builder()->PopContext(outer_->reg());
+      outer_->set_register(register_);
     }
     generator_->set_execution_context(outer_);
   }
@@ -67,6 +74,10 @@
   Register reg() const { return register_; }
 
  private:
+  const BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+
+  void set_register(Register reg) { register_ = reg; }
+
   BytecodeGenerator* generator_;
   Scope* scope_;
   ContextScope* outer_;
@@ -81,30 +92,141 @@
 class BytecodeGenerator::ControlScope BASE_EMBEDDED {
  public:
   explicit ControlScope(BytecodeGenerator* generator)
-      : generator_(generator), outer_(generator->execution_control()) {
+      : generator_(generator), outer_(generator->execution_control()),
+        context_(generator->execution_context()) {
     generator_->set_execution_control(this);
   }
   virtual ~ControlScope() { generator_->set_execution_control(outer()); }
 
   void Break(Statement* stmt) { PerformCommand(CMD_BREAK, stmt); }
   void Continue(Statement* stmt) { PerformCommand(CMD_CONTINUE, stmt); }
+  void ReturnAccumulator() { PerformCommand(CMD_RETURN, nullptr); }
+  void ReThrowAccumulator() { PerformCommand(CMD_RETHROW, nullptr); }
+
+  class DeferredCommands;
 
  protected:
-  enum Command { CMD_BREAK, CMD_CONTINUE };
+  enum Command { CMD_BREAK, CMD_CONTINUE, CMD_RETURN, CMD_RETHROW };
   void PerformCommand(Command command, Statement* statement);
   virtual bool Execute(Command command, Statement* statement) = 0;
 
   BytecodeGenerator* generator() const { return generator_; }
   ControlScope* outer() const { return outer_; }
+  ContextScope* context() const { return context_; }
 
  private:
   BytecodeGenerator* generator_;
   ControlScope* outer_;
+  ContextScope* context_;
 
   DISALLOW_COPY_AND_ASSIGN(ControlScope);
 };
 
 
+// Helper class for a try-finally control scope. It can record intercepted
+// control-flow commands that cause entry into a finally-block, and re-apply
+// them after again leaving that block. Special tokens are used to identify
+// paths going through the finally-block to dispatch after leaving the block.
+class BytecodeGenerator::ControlScope::DeferredCommands final {
+ public:
+  DeferredCommands(BytecodeGenerator* generator, Register token_register,
+                   Register result_register)
+      : generator_(generator),
+        deferred_(generator->zone()),
+        token_register_(token_register),
+        result_register_(result_register) {}
+
+  // One recorded control-flow command.
+  struct Entry {
+    Command command;       // The command type being applied on this path.
+    Statement* statement;  // The target statement for the command or {nullptr}.
+    int token;             // A token identifying this particular path.
+  };
+
+  // Records a control-flow command while entering the finally-block. This also
+  // generates a new dispatch token that identifies one particular path. This
+  // expects the result to be in the accumulator.
+  void RecordCommand(Command command, Statement* statement) {
+    int token = static_cast<int>(deferred_.size());
+    deferred_.push_back({command, statement, token});
+
+    builder()->StoreAccumulatorInRegister(result_register_);
+    builder()->LoadLiteral(Smi::FromInt(token));
+    builder()->StoreAccumulatorInRegister(token_register_);
+  }
+
+  // Records the dispatch token to be used to identify the re-throw path when
+  // the finally-block has been entered through the exception handler. This
+  // expects the exception to be in the accumulator.
+  void RecordHandlerReThrowPath() {
+    // The accumulator contains the exception object.
+    RecordCommand(CMD_RETHROW, nullptr);
+  }
+
+  // Records the dispatch token to be used to identify the implicit fall-through
+  // path at the end of a try-block into the corresponding finally-block.
+  void RecordFallThroughPath() {
+    builder()->LoadLiteral(Smi::FromInt(-1));
+    builder()->StoreAccumulatorInRegister(token_register_);
+  }
+
+  // Applies all recorded control-flow commands after the finally-block again.
+  // This generates a dynamic dispatch on the token from the entry point.
+  void ApplyDeferredCommands() {
+    // The fall-through path is covered by the default case, hence +1 here.
+    SwitchBuilder dispatch(builder(), static_cast<int>(deferred_.size() + 1));
+    for (size_t i = 0; i < deferred_.size(); ++i) {
+      Entry& entry = deferred_[i];
+      builder()->LoadLiteral(Smi::FromInt(entry.token));
+      builder()->CompareOperation(Token::EQ_STRICT, token_register_);
+      dispatch.Case(static_cast<int>(i));
+    }
+    dispatch.DefaultAt(static_cast<int>(deferred_.size()));
+    for (size_t i = 0; i < deferred_.size(); ++i) {
+      Entry& entry = deferred_[i];
+      dispatch.SetCaseTarget(static_cast<int>(i));
+      builder()->LoadAccumulatorWithRegister(result_register_);
+      execution_control()->PerformCommand(entry.command, entry.statement);
+    }
+    dispatch.SetCaseTarget(static_cast<int>(deferred_.size()));
+  }
+
+  BytecodeArrayBuilder* builder() { return generator_->builder(); }
+  ControlScope* execution_control() { return generator_->execution_control(); }
+
+ private:
+  BytecodeGenerator* generator_;
+  ZoneVector<Entry> deferred_;
+  Register token_register_;
+  Register result_register_;
+};
+
+
+// Scoped class for dealing with control flow reaching the function level.
+class BytecodeGenerator::ControlScopeForTopLevel final
+    : public BytecodeGenerator::ControlScope {
+ public:
+  explicit ControlScopeForTopLevel(BytecodeGenerator* generator)
+      : ControlScope(generator) {}
+
+ protected:
+  bool Execute(Command command, Statement* statement) override {
+    switch (command) {
+      case CMD_BREAK:
+      case CMD_CONTINUE:
+        break;
+      case CMD_RETURN:
+        generator()->builder()->Return();
+        return true;
+      case CMD_RETHROW:
+        generator()->builder()->ReThrow();
+        return true;
+    }
+    return false;
+  }
+};
+
+
 // Scoped class for enabling break inside blocks and switch blocks.
 class BytecodeGenerator::ControlScopeForBreakable final
     : public BytecodeGenerator::ControlScope {
@@ -117,13 +239,15 @@
         control_builder_(control_builder) {}
 
  protected:
-  virtual bool Execute(Command command, Statement* statement) {
+  bool Execute(Command command, Statement* statement) override {
     if (statement != statement_) return false;
     switch (command) {
       case CMD_BREAK:
         control_builder_->Break();
         return true;
       case CMD_CONTINUE:
+      case CMD_RETURN:
+      case CMD_RETHROW:
         break;
     }
     return false;
@@ -148,7 +272,7 @@
         loop_builder_(loop_builder) {}
 
  protected:
-  virtual bool Execute(Command command, Statement* statement) {
+  bool Execute(Command command, Statement* statement) override {
     if (statement != statement_) return false;
     switch (command) {
       case CMD_BREAK:
@@ -157,6 +281,9 @@
       case CMD_CONTINUE:
         loop_builder_->Continue();
         return true;
+      case CMD_RETURN:
+      case CMD_RETHROW:
+        break;
     }
     return false;
   }
@@ -167,12 +294,84 @@
 };
 
 
+// Scoped class for enabling 'throw' in try-catch constructs.
+class BytecodeGenerator::ControlScopeForTryCatch final
+    : public BytecodeGenerator::ControlScope {
+ public:
+  ControlScopeForTryCatch(BytecodeGenerator* generator,
+                          TryCatchBuilder* try_catch_builder)
+      : ControlScope(generator) {
+    generator->try_catch_nesting_level_++;
+  }
+  virtual ~ControlScopeForTryCatch() {
+    generator()->try_catch_nesting_level_--;
+  }
+
+ protected:
+  bool Execute(Command command, Statement* statement) override {
+    switch (command) {
+      case CMD_BREAK:
+      case CMD_CONTINUE:
+      case CMD_RETURN:
+        break;
+      case CMD_RETHROW:
+        generator()->builder()->ReThrow();
+        return true;
+    }
+    return false;
+  }
+};
+
+
+// Scoped class for enabling control flow through try-finally constructs.
+class BytecodeGenerator::ControlScopeForTryFinally final
+    : public BytecodeGenerator::ControlScope {
+ public:
+  ControlScopeForTryFinally(BytecodeGenerator* generator,
+                            TryFinallyBuilder* try_finally_builder,
+                            DeferredCommands* commands)
+      : ControlScope(generator),
+        try_finally_builder_(try_finally_builder),
+        commands_(commands) {
+    generator->try_finally_nesting_level_++;
+  }
+  virtual ~ControlScopeForTryFinally() {
+    generator()->try_finally_nesting_level_--;
+  }
+
+ protected:
+  bool Execute(Command command, Statement* statement) override {
+    switch (command) {
+      case CMD_BREAK:
+      case CMD_CONTINUE:
+      case CMD_RETURN:
+      case CMD_RETHROW:
+        commands_->RecordCommand(command, statement);
+        try_finally_builder_->LeaveTry();
+        return true;
+    }
+    return false;
+  }
+
+ private:
+  TryFinallyBuilder* try_finally_builder_;
+  DeferredCommands* commands_;
+};
+
+
 void BytecodeGenerator::ControlScope::PerformCommand(Command command,
                                                      Statement* statement) {
   ControlScope* current = this;
+  ContextScope* context = this->context();
   do {
-    if (current->Execute(command, statement)) return;
+    if (current->Execute(command, statement)) { return; }
     current = current->outer();
+    if (current->context() != context) {
+      // Pop context to the expected depth.
+      // TODO(rmcilroy): Only emit a single context pop.
+      generator()->builder()->PopContext(current->context()->reg());
+      context = current->context();
+    }
   } while (current != nullptr);
   UNREACHABLE();
 }
@@ -183,7 +382,8 @@
   explicit RegisterAllocationScope(BytecodeGenerator* generator)
       : generator_(generator),
         outer_(generator->register_allocator()),
-        allocator_(builder()) {
+        allocator_(builder()->zone(),
+                   builder()->temporary_register_allocator()) {
     generator_->set_register_allocator(this);
   }
 
@@ -205,11 +405,11 @@
       // walk the full context chain and compute the list of consecutive
       // reservations in the innerscopes.
       UNIMPLEMENTED();
-      return Register(-1);
+      return Register::invalid_value();
     }
   }
 
-  void PrepareForConsecutiveAllocations(size_t count) {
+  void PrepareForConsecutiveAllocations(int count) {
     allocator_.PrepareForConsecutiveAllocations(count);
   }
 
@@ -330,7 +530,7 @@
 
   virtual void SetResultInRegister(Register reg) {
     DCHECK(builder()->RegisterIsParameterOrLocal(reg) ||
-           (builder()->RegisterIsTemporary(reg) &&
+           (builder()->TemporaryRegisterIsLive(reg) &&
             !allocator()->RegisterIsAllocatedInThisScope(reg)));
     result_register_ = reg;
     set_result_identified();
@@ -342,32 +542,36 @@
   Register result_register_;
 };
 
-
 BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
     : isolate_(isolate),
       zone_(zone),
-      builder_(isolate, zone),
+      builder_(nullptr),
       info_(nullptr),
       scope_(nullptr),
       globals_(0, zone),
       execution_control_(nullptr),
       execution_context_(nullptr),
       execution_result_(nullptr),
-      register_allocator_(nullptr) {
+      register_allocator_(nullptr),
+      try_catch_nesting_level_(0),
+      try_finally_nesting_level_(0) {
   InitializeAstVisitor(isolate);
 }
 
-
 Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
   set_info(info);
   set_scope(info->scope());
 
+  // Initialize bytecode array builder.
+  set_builder(new (zone()) BytecodeArrayBuilder(
+      isolate(), zone(), info->num_parameters_including_this(),
+      scope()->MaxNestedContextChainLength(), scope()->num_stack_slots()));
+
   // Initialize the incoming context.
   ContextScope incoming_context(this, scope(), false);
 
-  builder()->set_parameter_count(info->num_parameters_including_this());
-  builder()->set_locals_count(scope()->num_stack_slots());
-  builder()->set_context_count(scope()->MaxNestedContextChainLength());
+  // Initialize control scope.
+  ControlScopeForTopLevel control(this);
 
   // Build function context only if there are context allocated variables.
   if (scope()->NeedsContext()) {
@@ -380,9 +584,10 @@
     MakeBytecodeBody();
   }
 
+  builder()->EnsureReturn(info->literal());
   set_scope(nullptr);
   set_info(nullptr);
-  return builder_.ToBytecodeArray();
+  return builder()->ToBytecodeArray();
 }
 
 
@@ -390,11 +595,10 @@
   // Build the arguments object if it is used.
   VisitArgumentsObject(scope()->arguments());
 
-  // TODO(mythria): Build rest arguments array if it is used.
+  // Build rest arguments array if it is used.
   int rest_index;
-  if (scope()->rest_parameter(&rest_index)) {
-    UNIMPLEMENTED();
-  }
+  Variable* rest_parameter = scope()->rest_parameter(&rest_index);
+  VisitRestArgumentsArray(rest_parameter);
 
   // Build assignment to {.this_function} variable if it is used.
   VisitThisFunctionVariable(scope()->this_function_var());
@@ -409,37 +613,40 @@
 
   // Visit illegal re-declaration and bail out if it exists.
   if (scope()->HasIllegalRedeclaration()) {
-    Visit(scope()->GetIllegalRedeclaration());
+    VisitForEffect(scope()->GetIllegalRedeclaration());
     return;
   }
 
   // Visit declarations within the function scope.
   VisitDeclarations(scope()->declarations());
 
+  // Perform a stack-check before the body.
+  builder()->StackCheck();
+
   // Visit statements in the function body.
   VisitStatements(info()->literal()->body());
 }
 
 
 void BytecodeGenerator::VisitBlock(Block* stmt) {
-  BlockBuilder block_builder(this->builder());
-  ControlScopeForBreakable execution_control(this, stmt, &block_builder);
-
-  if (stmt->scope() == NULL) {
-    // Visit statements in the same scope, no declarations.
-    VisitStatements(stmt->statements());
+  // Visit declarations and statements.
+  if (stmt->scope() != nullptr && stmt->scope()->NeedsContext()) {
+    VisitNewLocalBlockContext(stmt->scope());
+    ContextScope scope(this, stmt->scope());
+    VisitBlockDeclarationsAndStatements(stmt);
   } else {
-    // Visit declarations and statements in a block scope.
-    if (stmt->scope()->NeedsContext()) {
-      VisitNewLocalBlockContext(stmt->scope());
-      ContextScope scope(this, stmt->scope());
-      VisitDeclarations(stmt->scope()->declarations());
-      VisitStatements(stmt->statements());
-    } else {
-      VisitDeclarations(stmt->scope()->declarations());
-      VisitStatements(stmt->statements());
-    }
+    VisitBlockDeclarationsAndStatements(stmt);
   }
+}
+
+
+void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
+  BlockBuilder block_builder(builder());
+  ControlScopeForBreakable execution_control(this, stmt, &block_builder);
+  if (stmt->scope() != nullptr) {
+    VisitDeclarations(stmt->scope()->declarations());
+  }
+  VisitStatements(stmt->statements());
   if (stmt->labels() != nullptr) block_builder.EndBlock();
 }
 
@@ -480,9 +687,31 @@
                                                   variable->index());
       }
       break;
-    case VariableLocation::LOOKUP:
-      UNIMPLEMENTED();
+    case VariableLocation::LOOKUP: {
+      DCHECK(IsDeclaredVariableMode(mode));
+
+      register_allocator()->PrepareForConsecutiveAllocations(3);
+      Register name = register_allocator()->NextConsecutiveRegister();
+      Register init_value = register_allocator()->NextConsecutiveRegister();
+      Register attributes = register_allocator()->NextConsecutiveRegister();
+
+      builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
+      if (hole_init) {
+        builder()->LoadTheHole().StoreAccumulatorInRegister(init_value);
+      } else {
+        // For variables, we must not use an initial value (such as 'undefined')
+        // because we may have a (legal) redeclaration and we must not destroy
+        // the current value.
+        builder()
+            ->LoadLiteral(Smi::FromInt(0))
+            .StoreAccumulatorInRegister(init_value);
+      }
+      builder()
+          ->LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
+          .StoreAccumulatorInRegister(attributes)
+          .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
       break;
+    }
   }
 }
 
@@ -503,7 +732,10 @@
     case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL: {
       VisitForAccumulatorValue(decl->fun());
-      VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+      DCHECK(variable->mode() == LET || variable->mode() == VAR ||
+             variable->mode() == CONST);
+      VisitVariableAssignment(variable, Token::INIT,
+                              FeedbackVectorSlot::Invalid());
       break;
     }
     case VariableLocation::CONTEXT: {
@@ -513,8 +745,20 @@
                                   variable->index());
       break;
     }
-    case VariableLocation::LOOKUP:
-      UNIMPLEMENTED();
+    case VariableLocation::LOOKUP: {
+      register_allocator()->PrepareForConsecutiveAllocations(3);
+      Register name = register_allocator()->NextConsecutiveRegister();
+      Register literal = register_allocator()->NextConsecutiveRegister();
+      Register attributes = register_allocator()->NextConsecutiveRegister();
+      builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
+
+      VisitForAccumulatorValue(decl->fun());
+      builder()
+          ->StoreAccumulatorInRegister(literal)
+          .LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
+          .StoreAccumulatorInRegister(attributes)
+          .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
+    }
   }
 }
 
@@ -533,7 +777,10 @@
     ZoneList<Declaration*>* declarations) {
   RegisterAllocationScope register_scope(this);
   DCHECK(globals()->empty());
-  AstVisitor::VisitDeclarations(declarations);
+  for (int i = 0; i < declarations->length(); i++) {
+    RegisterAllocationScope register_scope(this);
+    Visit(declarations->at(i));
+  }
   if (globals()->empty()) return;
   int array_index = 0;
   Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
@@ -569,6 +816,7 @@
 
 
 void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+  builder()->SetStatementPosition(stmt);
   VisitForEffect(stmt->expression());
 }
 
@@ -624,12 +872,16 @@
 
 void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
   VisitForAccumulatorValue(stmt->expression());
-  builder()->Return();
+  builder()->SetStatementPosition(stmt);
+  execution_control()->ReturnAccumulator();
 }
 
 
 void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
-  UNIMPLEMENTED();
+  VisitForAccumulatorValue(stmt->expression());
+  builder()->CastAccumulatorToJSObject();
+  VisitNewLocalWithContext();
+  VisitInScope(stmt->statement(), stmt->scope());
 }
 
 
@@ -657,8 +909,7 @@
 
     // Perform label comparison as if via '===' with tag.
     VisitForAccumulatorValue(clause->label());
-    builder()->CompareOperation(Token::Value::EQ_STRICT, tag,
-                                language_mode_strength());
+    builder()->CompareOperation(Token::Value::EQ_STRICT, tag);
     switch_builder.Case(i);
   }
 
@@ -688,20 +939,25 @@
   UNREACHABLE();
 }
 
+void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
+                                           LoopBuilder* loop_builder) {
+  ControlScopeForIteration execution_control(this, stmt, loop_builder);
+  builder()->StackCheck();
+  Visit(stmt->body());
+}
 
 void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
   LoopBuilder loop_builder(builder());
-  ControlScopeForIteration execution_control(this, stmt, &loop_builder);
   loop_builder.LoopHeader();
   if (stmt->cond()->ToBooleanIsFalse()) {
-    Visit(stmt->body());
+    VisitIterationBody(stmt, &loop_builder);
     loop_builder.Condition();
   } else if (stmt->cond()->ToBooleanIsTrue()) {
     loop_builder.Condition();
-    Visit(stmt->body());
+    VisitIterationBody(stmt, &loop_builder);
     loop_builder.JumpToHeader();
   } else {
-    Visit(stmt->body());
+    VisitIterationBody(stmt, &loop_builder);
     loop_builder.Condition();
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.JumpToHeaderIfTrue();
@@ -709,7 +965,6 @@
   loop_builder.EndLoop();
 }
 
-
 void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
   if (stmt->cond()->ToBooleanIsFalse()) {
     // If the condition is false there is no need to generate the loop.
@@ -717,14 +972,13 @@
   }
 
   LoopBuilder loop_builder(builder());
-  ControlScopeForIteration execution_control(this, stmt, &loop_builder);
   loop_builder.LoopHeader();
   loop_builder.Condition();
   if (!stmt->cond()->ToBooleanIsTrue()) {
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.BreakIfFalse();
   }
-  Visit(stmt->body());
+  VisitIterationBody(stmt, &loop_builder);
   loop_builder.JumpToHeader();
   loop_builder.EndLoop();
 }
@@ -741,15 +995,13 @@
   }
 
   LoopBuilder loop_builder(builder());
-  ControlScopeForIteration execution_control(this, stmt, &loop_builder);
-
   loop_builder.LoopHeader();
   loop_builder.Condition();
   if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
     VisitForAccumulatorValue(stmt->cond());
     loop_builder.BreakIfFalse();
   }
-  Visit(stmt->body());
+  VisitIterationBody(stmt, &loop_builder);
   if (stmt->next() != nullptr) {
     loop_builder.Next();
     Visit(stmt->next());
@@ -770,7 +1022,7 @@
   switch (assign_type) {
     case VARIABLE: {
       Variable* variable = expr->AsVariableProxy()->var();
-      VisitVariableAssignment(variable, slot);
+      VisitVariableAssignment(variable, Token::ASSIGN, slot);
       break;
     }
     case NAMED_PROPERTY: {
@@ -795,9 +1047,40 @@
                                     language_mode());
       break;
     }
-    case NAMED_SUPER_PROPERTY:
-    case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+    case NAMED_SUPER_PROPERTY: {
+      RegisterAllocationScope register_scope(this);
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      Register receiver = register_allocator()->NextConsecutiveRegister();
+      Register home_object = register_allocator()->NextConsecutiveRegister();
+      Register name = register_allocator()->NextConsecutiveRegister();
+      Register value = register_allocator()->NextConsecutiveRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), receiver);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      builder()
+          ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+          .StoreAccumulatorInRegister(name);
+      BuildNamedSuperPropertyStore(receiver, home_object, name, value);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      RegisterAllocationScope register_scope(this);
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      Register receiver = register_allocator()->NextConsecutiveRegister();
+      Register home_object = register_allocator()->NextConsecutiveRegister();
+      Register key = register_allocator()->NextConsecutiveRegister();
+      Register value = register_allocator()->NextConsecutiveRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), receiver);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      VisitForRegisterValue(property->key(), key);
+      BuildKeyedSuperPropertyStore(receiver, home_object, key, value);
+      break;
+    }
   }
 }
 
@@ -810,7 +1093,6 @@
   }
 
   LoopBuilder loop_builder(builder());
-  ControlScopeForIteration control_scope(this, stmt, &loop_builder);
   BytecodeLabel subject_null_label, subject_undefined_label, not_object_label;
 
   // Prepare the state for executing ForIn.
@@ -821,10 +1103,14 @@
   builder()->CastAccumulatorToJSObject();
   builder()->JumpIfNull(&not_object_label);
   builder()->StoreAccumulatorInRegister(receiver);
-  Register cache_type = register_allocator()->NewRegister();
-  Register cache_array = register_allocator()->NewRegister();
-  Register cache_length = register_allocator()->NewRegister();
-  builder()->ForInPrepare(cache_type, cache_array, cache_length);
+
+  register_allocator()->PrepareForConsecutiveAllocations(3);
+  Register cache_type = register_allocator()->NextConsecutiveRegister();
+  Register cache_array = register_allocator()->NextConsecutiveRegister();
+  Register cache_length = register_allocator()->NextConsecutiveRegister();
+  // Used as kRegTriple8 and kRegPair8 in ForInPrepare and ForInNext.
+  USE(cache_array);
+  builder()->ForInPrepare(cache_type);
 
   // Set up loop counter
   Register index = register_allocator()->NewRegister();
@@ -836,10 +1122,11 @@
   loop_builder.Condition();
   builder()->ForInDone(index, cache_length);
   loop_builder.BreakIfTrue();
-  builder()->ForInNext(receiver, cache_type, cache_array, index);
+  DCHECK(Register::AreContiguous(cache_type, cache_array));
+  builder()->ForInNext(receiver, index, cache_type);
   loop_builder.ContinueIfUndefined();
   VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
-  Visit(stmt->body());
+  VisitIterationBody(stmt, &loop_builder);
   loop_builder.Next();
   builder()->ForInStep(index);
   builder()->StoreAccumulatorInRegister(index);
@@ -852,31 +1139,127 @@
 
 
 void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
-  UNIMPLEMENTED();
+  LoopBuilder loop_builder(builder());
+  ControlScopeForIteration control_scope(this, stmt, &loop_builder);
+
+  VisitForEffect(stmt->assign_iterator());
+
+  loop_builder.LoopHeader();
+  loop_builder.Next();
+  VisitForEffect(stmt->next_result());
+  VisitForAccumulatorValue(stmt->result_done());
+  loop_builder.BreakIfTrue();
+
+  VisitForEffect(stmt->assign_each());
+  VisitIterationBody(stmt, &loop_builder);
+  loop_builder.JumpToHeader();
+  loop_builder.EndLoop();
 }
 
 
 void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  if (FLAG_ignition_fake_try_catch) {
+  TryCatchBuilder try_control_builder(builder());
+  Register no_reg;
+
+  // Preserve the context in a dedicated register, so that it can be restored
+  // when the handler is entered by the stack-unwinding machinery.
+  // TODO(mstarzinger): Be smarter about register allocation.
+  Register context = register_allocator()->NewRegister();
+  builder()->MoveRegister(Register::current_context(), context);
+
+  // Evaluate the try-block inside a control scope. This simulates a handler
+  // that is intercepting 'throw' control commands.
+  try_control_builder.BeginTry(context);
+  {
+    ControlScopeForTryCatch scope(this, &try_control_builder);
     Visit(stmt->try_block());
-    return;
   }
-  UNIMPLEMENTED();
+  try_control_builder.EndTry();
+
+  // Create a catch scope that binds the exception.
+  VisitNewLocalCatchContext(stmt->variable());
+  builder()->StoreAccumulatorInRegister(context);
+
+  // Clear message object as we enter the catch block.
+  builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0);
+
+  // Load the catch context into the accumulator.
+  builder()->LoadAccumulatorWithRegister(context);
+
+  // Evaluate the catch-block.
+  VisitInScope(stmt->catch_block(), stmt->scope());
+  try_control_builder.EndCatch();
 }
 
 
 void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  if (FLAG_ignition_fake_try_catch) {
+  TryFinallyBuilder try_control_builder(builder(), IsInsideTryCatch());
+  Register no_reg;
+
+  // We keep a record of all paths that enter the finally-block to be able to
+  // dispatch to the correct continuation point after the statements in the
+  // finally-block have been evaluated.
+  //
+  // The try-finally construct can enter the finally-block in three ways:
+  // 1. By exiting the try-block normally, falling through at the end.
+  // 2. By exiting the try-block with a function-local control flow transfer
+  //    (i.e. through break/continue/return statements).
+  // 3. By exiting the try-block with a thrown exception.
+  //
+  // The result register semantics depend on how the block was entered:
+  //  - ReturnStatement: It represents the return value being returned.
+  //  - ThrowStatement: It represents the exception being thrown.
+  //  - BreakStatement/ContinueStatement: Undefined and not used.
+  //  - Falling through into finally-block: Undefined and not used.
+  Register token = register_allocator()->NewRegister();
+  Register result = register_allocator()->NewRegister();
+  ControlScope::DeferredCommands commands(this, token, result);
+
+  // Preserve the context in a dedicated register, so that it can be restored
+  // when the handler is entered by the stack-unwinding machinery.
+  // TODO(mstarzinger): Be smarter about register allocation.
+  Register context = register_allocator()->NewRegister();
+  builder()->MoveRegister(Register::current_context(), context);
+
+  // Evaluate the try-block inside a control scope. This simulates a handler
+  // that is intercepting all control commands.
+  try_control_builder.BeginTry(context);
+  {
+    ControlScopeForTryFinally scope(this, &try_control_builder, &commands);
     Visit(stmt->try_block());
-    Visit(stmt->finally_block());
-    return;
   }
-  UNIMPLEMENTED();
+  try_control_builder.EndTry();
+
+  // Record fall-through and exception cases.
+  commands.RecordFallThroughPath();
+  try_control_builder.LeaveTry();
+  try_control_builder.BeginHandler();
+  commands.RecordHandlerReThrowPath();
+
+  // Pending message object is saved on entry.
+  try_control_builder.BeginFinally();
+  Register message = context;  // Reuse register.
+
+  // Clear message object as we enter the finally block.
+  builder()
+      ->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0)
+      .StoreAccumulatorInRegister(message);
+
+  // Evaluate the finally-block.
+  Visit(stmt->finally_block());
+  try_control_builder.EndFinally();
+
+  // Pending message object is restored on exit.
+  builder()->CallRuntime(Runtime::kInterpreterSetPendingMessage, message, 1);
+
+  // Dynamic dispatch after the finally-block.
+  commands.ApplyDeferredCommands();
 }
 
 
 void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
-  UNIMPLEMENTED();
+  builder()->SetStatementPosition(stmt);
+  builder()->Debugger();
 }
 
 
@@ -892,18 +1275,166 @@
 
 
 void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
-  UNIMPLEMENTED();
+  if (expr->scope()->ContextLocalCount() > 0) {
+    VisitNewLocalBlockContext(expr->scope());
+    ContextScope scope(this, expr->scope());
+    VisitDeclarations(expr->scope()->declarations());
+    VisitClassLiteralContents(expr);
+  } else {
+    VisitDeclarations(expr->scope()->declarations());
+    VisitClassLiteralContents(expr);
+  }
 }
 
+void BytecodeGenerator::VisitClassLiteralContents(ClassLiteral* expr) {
+  VisitClassLiteralForRuntimeDefinition(expr);
+
+  // Load the "prototype" from the constructor.
+  register_allocator()->PrepareForConsecutiveAllocations(2);
+  Register literal = register_allocator()->NextConsecutiveRegister();
+  Register prototype = register_allocator()->NextConsecutiveRegister();
+  Handle<String> name = isolate()->factory()->prototype_string();
+  FeedbackVectorSlot slot = expr->PrototypeSlot();
+  builder()
+      ->StoreAccumulatorInRegister(literal)
+      .LoadNamedProperty(literal, name, feedback_index(slot))
+      .StoreAccumulatorInRegister(prototype);
+
+  VisitClassLiteralProperties(expr, literal, prototype);
+  builder()->CallRuntime(Runtime::kFinalizeClassDefinition, literal, 2);
+  // Assign to class variable.
+  if (expr->class_variable_proxy() != nullptr) {
+    Variable* var = expr->class_variable_proxy()->var();
+    FeedbackVectorSlot slot = expr->NeedsProxySlot()
+                                  ? expr->ProxySlot()
+                                  : FeedbackVectorSlot::Invalid();
+    VisitVariableAssignment(var, Token::INIT, slot);
+  }
+  execution_result()->SetResultInAccumulator();
+}
+
+void BytecodeGenerator::VisitClassLiteralForRuntimeDefinition(
+    ClassLiteral* expr) {
+  AccumulatorResultScope result_scope(this);
+  register_allocator()->PrepareForConsecutiveAllocations(4);
+  Register extends = register_allocator()->NextConsecutiveRegister();
+  Register constructor = register_allocator()->NextConsecutiveRegister();
+  Register start_position = register_allocator()->NextConsecutiveRegister();
+  Register end_position = register_allocator()->NextConsecutiveRegister();
+
+  VisitForAccumulatorValueOrTheHole(expr->extends());
+  builder()->StoreAccumulatorInRegister(extends);
+
+  VisitForAccumulatorValue(expr->constructor());
+  builder()
+      ->StoreAccumulatorInRegister(constructor)
+      .LoadLiteral(Smi::FromInt(expr->start_position()))
+      .StoreAccumulatorInRegister(start_position)
+      .LoadLiteral(Smi::FromInt(expr->end_position()))
+      .StoreAccumulatorInRegister(end_position)
+      .CallRuntime(Runtime::kDefineClass, extends, 4);
+  result_scope.SetResultInAccumulator();
+}
+
+void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
+                                                    Register literal,
+                                                    Register prototype) {
+  RegisterAllocationScope register_scope(this);
+  register_allocator()->PrepareForConsecutiveAllocations(5);
+  Register receiver = register_allocator()->NextConsecutiveRegister();
+  Register key = register_allocator()->NextConsecutiveRegister();
+  Register value = register_allocator()->NextConsecutiveRegister();
+  Register attr = register_allocator()->NextConsecutiveRegister();
+  Register set_function_name = register_allocator()->NextConsecutiveRegister();
+
+  bool attr_assigned = false;
+  Register old_receiver = Register::invalid_value();
+
+  // Create nodes to store method values into the literal.
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+
+    // Set-up receiver.
+    Register new_receiver = property->is_static() ? literal : prototype;
+    if (new_receiver != old_receiver) {
+      builder()->MoveRegister(new_receiver, receiver);
+      old_receiver = new_receiver;
+    }
+
+    VisitForAccumulatorValue(property->key());
+    builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
+    // The static prototype property is read only. We handle the non computed
+    // property name case in the parser. Since this is the only case where we
+    // need to check for an own read only property we special case this so we do
+    // not need to do this for every property.
+    if (property->is_static() && property->is_computed_name()) {
+      VisitClassLiteralStaticPrototypeWithComputedName(key);
+    }
+    VisitForAccumulatorValue(property->value());
+    builder()->StoreAccumulatorInRegister(value);
+
+    VisitSetHomeObject(value, receiver, property);
+
+    if (!attr_assigned) {
+      builder()
+          ->LoadLiteral(Smi::FromInt(DONT_ENUM))
+          .StoreAccumulatorInRegister(attr);
+      attr_assigned = true;
+    }
+
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+      case ObjectLiteral::Property::PROTOTYPE:
+        // Invalid properties for ES6 classes.
+        UNREACHABLE();
+        break;
+      case ObjectLiteral::Property::COMPUTED: {
+        builder()
+            ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
+            .StoreAccumulatorInRegister(set_function_name);
+        builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral, receiver,
+                               5);
+        break;
+      }
+      case ObjectLiteral::Property::GETTER: {
+        builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
+                               receiver, 4);
+        break;
+      }
+      case ObjectLiteral::Property::SETTER: {
+        builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
+                               receiver, 4);
+        break;
+      }
+    }
+  }
+}
+
+void BytecodeGenerator::VisitClassLiteralStaticPrototypeWithComputedName(
+    Register key) {
+  BytecodeLabel done;
+  builder()
+      ->LoadLiteral(isolate()->factory()->prototype_string())
+      .CompareOperation(Token::Value::EQ_STRICT, key)
+      .JumpIfFalse(&done)
+      .CallRuntime(Runtime::kThrowStaticPrototypeError, Register(0), 0)
+      .Bind(&done);
+}
 
 void BytecodeGenerator::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* expr) {
-  UNIMPLEMENTED();
+  // Find or build a shared function info for the native function template.
+  Handle<SharedFunctionInfo> shared_info =
+      Compiler::GetSharedFunctionInfoForNative(expr->extension(), expr->name());
+  builder()->CreateClosure(shared_info, NOT_TENURED);
+  execution_result()->SetResultInAccumulator();
 }
 
 
 void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
-  UNIMPLEMENTED();
+  VisitBlock(expr->block());
+  VisitVariableProxy(expr->result());
 }
 
 
@@ -964,10 +1495,13 @@
   builder()->CreateObjectLiteral(expr->constant_properties(),
                                  expr->literal_index(),
                                  expr->ComputeFlags(true));
-  Register literal;
+
+  // Allocate in the outer scope since this register is used to return the
+  // expression's results to the caller.
+  Register literal = register_allocator()->outer()->NewRegister();
+  builder()->StoreAccumulatorInRegister(literal);
 
   // Store computed values into the literal.
-  bool literal_in_accumulator = true;
   int property_index = 0;
   AccessorTable accessor_table(zone());
   for (; property_index < expr->properties()->length(); property_index++) {
@@ -975,12 +1509,6 @@
     if (property->is_computed_name()) break;
     if (property->IsCompileTimeValue()) continue;
 
-    if (literal_in_accumulator) {
-      literal = register_allocator()->NewRegister();
-      builder()->StoreAccumulatorInRegister(literal);
-      literal_in_accumulator = false;
-    }
-
     RegisterAllocationScope inner_register_scope(this);
     Literal* literal_key = property->key()->AsLiteral();
     switch (property->kind()) {
@@ -995,21 +1523,31 @@
         if (literal_key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(property->value());
-            builder()->StoreNamedProperty(
-                literal, literal_key->AsPropertyName(),
-                feedback_index(property->GetSlot(0)), language_mode());
+            if (FunctionLiteral::NeedsHomeObject(property->value())) {
+              RegisterAllocationScope register_scope(this);
+              Register value = register_allocator()->NewRegister();
+              builder()->StoreAccumulatorInRegister(value);
+              builder()->StoreNamedProperty(
+                  literal, literal_key->AsPropertyName(),
+                  feedback_index(property->GetSlot(0)), language_mode());
+              VisitSetHomeObject(value, literal, property, 1);
+            } else {
+              builder()->StoreNamedProperty(
+                  literal, literal_key->AsPropertyName(),
+                  feedback_index(property->GetSlot(0)), language_mode());
+            }
           } else {
             VisitForEffect(property->value());
           }
         } else {
-          register_allocator()->PrepareForConsecutiveAllocations(3);
+          register_allocator()->PrepareForConsecutiveAllocations(4);
+          Register literal_argument =
+              register_allocator()->NextConsecutiveRegister();
           Register key = register_allocator()->NextConsecutiveRegister();
           Register value = register_allocator()->NextConsecutiveRegister();
           Register language = register_allocator()->NextConsecutiveRegister();
-          // TODO(oth): This is problematic - can't assume contiguous here.
-          // literal is allocated in outer register scope, whereas key, value,
-          // language are in another.
-          DCHECK(Register::AreContiguous(literal, key, value, language));
+
+          builder()->MoveRegister(literal, literal_argument);
           VisitForAccumulatorValue(property->key());
           builder()->StoreAccumulatorInRegister(key);
           VisitForAccumulatorValue(property->value());
@@ -1018,20 +1556,23 @@
             builder()
                 ->LoadLiteral(Smi::FromInt(SLOPPY))
                 .StoreAccumulatorInRegister(language)
-                .CallRuntime(Runtime::kSetProperty, literal, 4);
+                .CallRuntime(Runtime::kSetProperty, literal_argument, 4);
             VisitSetHomeObject(value, literal, property);
           }
         }
         break;
       }
       case ObjectLiteral::Property::PROTOTYPE: {
-        register_allocator()->PrepareForConsecutiveAllocations(1);
         DCHECK(property->emit_store());
+        register_allocator()->PrepareForConsecutiveAllocations(2);
+        Register literal_argument =
+            register_allocator()->NextConsecutiveRegister();
         Register value = register_allocator()->NextConsecutiveRegister();
-        DCHECK(Register::AreContiguous(literal, value));
+
+        builder()->MoveRegister(literal, literal_argument);
         VisitForAccumulatorValue(property->value());
         builder()->StoreAccumulatorInRegister(value).CallRuntime(
-            Runtime::kInternalSetPrototype, literal, 2);
+            Runtime::kInternalSetPrototype, literal_argument, 2);
         break;
       }
       case ObjectLiteral::Property::GETTER:
@@ -1052,12 +1593,14 @@
   for (AccessorTable::Iterator it = accessor_table.begin();
        it != accessor_table.end(); ++it) {
     RegisterAllocationScope inner_register_scope(this);
-    register_allocator()->PrepareForConsecutiveAllocations(4);
+    register_allocator()->PrepareForConsecutiveAllocations(5);
+    Register literal_argument = register_allocator()->NextConsecutiveRegister();
     Register name = register_allocator()->NextConsecutiveRegister();
     Register getter = register_allocator()->NextConsecutiveRegister();
     Register setter = register_allocator()->NextConsecutiveRegister();
     Register attr = register_allocator()->NextConsecutiveRegister();
-    DCHECK(Register::AreContiguous(literal, name, getter, setter, attr));
+
+    builder()->MoveRegister(literal, literal_argument);
     VisitForAccumulatorValue(it->first);
     builder()->StoreAccumulatorInRegister(name);
     VisitObjectLiteralAccessor(literal, it->second->getter, getter);
@@ -1065,7 +1608,8 @@
     builder()
         ->LoadLiteral(Smi::FromInt(NONE))
         .StoreAccumulatorInRegister(attr)
-        .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, literal, 5);
+        .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked,
+                     literal_argument, 5);
   }
 
   // Object literals have two parts. The "static" part on the left contains no
@@ -1078,67 +1622,69 @@
   // compile them into a series of "SetOwnProperty" runtime calls. This will
   // preserve insertion order.
   for (; property_index < expr->properties()->length(); property_index++) {
-    if (literal_in_accumulator) {
-      literal = register_allocator()->NewRegister();
-      builder()->StoreAccumulatorInRegister(literal);
-      literal_in_accumulator = false;
-    }
-
     ObjectLiteral::Property* property = expr->properties()->at(property_index);
     RegisterAllocationScope inner_register_scope(this);
+
     if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
       DCHECK(property->emit_store());
-      Register value = register_allocator()->NewRegister();
-      DCHECK(Register::AreContiguous(literal, value));
+      register_allocator()->PrepareForConsecutiveAllocations(2);
+      Register literal_argument =
+          register_allocator()->NextConsecutiveRegister();
+      Register value = register_allocator()->NextConsecutiveRegister();
+
+      builder()->MoveRegister(literal, literal_argument);
       VisitForAccumulatorValue(property->value());
       builder()->StoreAccumulatorInRegister(value).CallRuntime(
-          Runtime::kInternalSetPrototype, literal, 2);
+          Runtime::kInternalSetPrototype, literal_argument, 2);
       continue;
     }
 
-    register_allocator()->PrepareForConsecutiveAllocations(3);
+    register_allocator()->PrepareForConsecutiveAllocations(5);
+    Register literal_argument = register_allocator()->NextConsecutiveRegister();
     Register key = register_allocator()->NextConsecutiveRegister();
     Register value = register_allocator()->NextConsecutiveRegister();
     Register attr = register_allocator()->NextConsecutiveRegister();
-    DCHECK(Register::AreContiguous(literal, key, value, attr));
+    DCHECK(Register::AreContiguous(literal_argument, key, value, attr));
+    Register set_function_name =
+        register_allocator()->NextConsecutiveRegister();
 
+    builder()->MoveRegister(literal, literal_argument);
     VisitForAccumulatorValue(property->key());
     builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
     VisitForAccumulatorValue(property->value());
     builder()->StoreAccumulatorInRegister(value);
     VisitSetHomeObject(value, literal, property);
     builder()->LoadLiteral(Smi::FromInt(NONE)).StoreAccumulatorInRegister(attr);
-    Runtime::FunctionId function_id = static_cast<Runtime::FunctionId>(-1);
     switch (property->kind()) {
       case ObjectLiteral::Property::CONSTANT:
       case ObjectLiteral::Property::COMPUTED:
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        function_id = Runtime::kDefineDataPropertyUnchecked;
+        builder()
+            ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
+            .StoreAccumulatorInRegister(set_function_name);
+        builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral,
+                               literal_argument, 5);
         break;
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();  // Handled specially above.
         break;
       case ObjectLiteral::Property::GETTER:
-        function_id = Runtime::kDefineGetterPropertyUnchecked;
+        builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
+                               literal_argument, 4);
         break;
       case ObjectLiteral::Property::SETTER:
-        function_id = Runtime::kDefineSetterPropertyUnchecked;
+        builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
+                               literal_argument, 4);
         break;
     }
-    builder()->CallRuntime(function_id, literal, 4);
   }
 
   // Transform literals that contain functions to fast properties.
   if (expr->has_function()) {
-    DCHECK(!literal_in_accumulator);
     builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
   }
 
-  if (!literal_in_accumulator) {
-    // Restore literal array into accumulator.
-    builder()->LoadAccumulatorWithRegister(literal);
-  }
-  execution_result()->SetResultInAccumulator();
+  execution_result()->SetResultInRegister(literal);
 }
 
 
@@ -1156,10 +1702,7 @@
        array_index++) {
     Expression* subexpr = expr->values()->at(array_index);
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-    if (subexpr->IsSpread()) {
-      // TODO(rmcilroy): Deal with spread expressions.
-      UNIMPLEMENTED();
-    }
+    DCHECK(!subexpr->IsSpread());
 
     if (literal_in_accumulator) {
       index = register_allocator()->NewRegister();
@@ -1189,14 +1732,25 @@
   VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
 }
 
+void BytecodeGenerator::BuildHoleCheckForVariableLoad(VariableMode mode,
+                                                      Handle<String> name) {
+  if (mode == CONST_LEGACY) {
+    BytecodeLabel end_label;
+    builder()->JumpIfNotHole(&end_label).LoadUndefined().Bind(&end_label);
+  } else if (mode == LET || mode == CONST) {
+    BuildThrowIfHole(name);
+  }
+}
 
 void BytecodeGenerator::VisitVariableLoad(Variable* variable,
                                           FeedbackVectorSlot slot,
                                           TypeofMode typeof_mode) {
+  VariableMode mode = variable->mode();
   switch (variable->location()) {
     case VariableLocation::LOCAL: {
       Register source(Register(variable->index()));
       builder()->LoadAccumulatorWithRegister(source);
+      BuildHoleCheckForVariableLoad(mode, variable->name());
       execution_result()->SetResultInAccumulator();
       break;
     }
@@ -1205,13 +1759,14 @@
       // index -1 but is parameter index 0 in BytecodeArrayBuilder).
       Register source = builder()->Parameter(variable->index() + 1);
       builder()->LoadAccumulatorWithRegister(source);
+      BuildHoleCheckForVariableLoad(mode, variable->name());
       execution_result()->SetResultInAccumulator();
       break;
     }
     case VariableLocation::GLOBAL:
     case VariableLocation::UNALLOCATED: {
       builder()->LoadGlobal(variable->name(), feedback_index(slot),
-                            language_mode(), typeof_mode);
+                            typeof_mode);
       execution_result()->SetResultInAccumulator();
       break;
     }
@@ -1237,10 +1792,10 @@
               .StoreAccumulatorInRegister(context_reg);
         }
       }
+
       builder()->LoadContextSlot(context_reg, variable->index());
+      BuildHoleCheckForVariableLoad(mode, variable->name());
       execution_result()->SetResultInAccumulator();
-      // TODO(rmcilroy): Perform check for uninitialized legacy const, const and
-      // let variables.
       break;
     }
     case VariableLocation::LOOKUP: {
@@ -1251,14 +1806,12 @@
   }
 }
 
-
 void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
     Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
   AccumulatorResultScope accumulator_result(this);
   VisitVariableLoad(variable, slot, typeof_mode);
 }
 
-
 Register BytecodeGenerator::VisitVariableLoadForRegisterValue(
     Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
   RegisterResultScope register_scope(this);
@@ -1266,20 +1819,150 @@
   return register_scope.ResultRegister();
 }
 
+void BytecodeGenerator::BuildNamedSuperPropertyLoad(Register receiver,
+                                                    Register home_object,
+                                                    Register name) {
+  DCHECK(Register::AreContiguous(receiver, home_object, name));
+  builder()->CallRuntime(Runtime::kLoadFromSuper, receiver, 3);
+}
+
+void BytecodeGenerator::BuildKeyedSuperPropertyLoad(Register receiver,
+                                                    Register home_object,
+                                                    Register key) {
+  DCHECK(Register::AreContiguous(receiver, home_object, key));
+  builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, receiver, 3);
+}
+
+void BytecodeGenerator::BuildNamedSuperPropertyStore(Register receiver,
+                                                     Register home_object,
+                                                     Register name,
+                                                     Register value) {
+  DCHECK(Register::AreContiguous(receiver, home_object, name, value));
+  Runtime::FunctionId function_id = is_strict(language_mode())
+                                        ? Runtime::kStoreToSuper_Strict
+                                        : Runtime::kStoreToSuper_Sloppy;
+  builder()->CallRuntime(function_id, receiver, 4);
+}
+
+void BytecodeGenerator::BuildKeyedSuperPropertyStore(Register receiver,
+                                                     Register home_object,
+                                                     Register key,
+                                                     Register value) {
+  DCHECK(Register::AreContiguous(receiver, home_object, key, value));
+  Runtime::FunctionId function_id = is_strict(language_mode())
+                                        ? Runtime::kStoreKeyedToSuper_Strict
+                                        : Runtime::kStoreKeyedToSuper_Sloppy;
+  builder()->CallRuntime(function_id, receiver, 4);
+}
+
+void BytecodeGenerator::BuildThrowReferenceError(Handle<String> name) {
+  RegisterAllocationScope register_scope(this);
+  Register name_reg = register_allocator()->NewRegister();
+  builder()->LoadLiteral(name).StoreAccumulatorInRegister(name_reg).CallRuntime(
+      Runtime::kThrowReferenceError, name_reg, 1);
+}
+
+void BytecodeGenerator::BuildThrowIfHole(Handle<String> name) {
+  // TODO(interpreter): Can the parser reduce the number of checks
+  // performed? Or should there be a ThrowIfHole bytecode.
+  BytecodeLabel no_reference_error;
+  builder()->JumpIfNotHole(&no_reference_error);
+  BuildThrowReferenceError(name);
+  builder()->Bind(&no_reference_error);
+}
+
+void BytecodeGenerator::BuildThrowIfNotHole(Handle<String> name) {
+  // TODO(interpreter): Can the parser reduce the number of checks
+  // performed? Or should there be a ThrowIfNotHole bytecode.
+  BytecodeLabel no_reference_error, reference_error;
+  builder()
+      ->JumpIfNotHole(&reference_error)
+      .Jump(&no_reference_error)
+      .Bind(&reference_error);
+  BuildThrowReferenceError(name);
+  builder()->Bind(&no_reference_error);
+}
+
+void BytecodeGenerator::BuildThrowReassignConstant(Handle<String> name) {
+  // TODO(mythria): This will be replaced by a new bytecode that throws an
+  // appropriate error depending on the whether the value is a hole or not.
+  BytecodeLabel const_assign_error;
+  builder()->JumpIfNotHole(&const_assign_error);
+  BuildThrowReferenceError(name);
+  builder()
+      ->Bind(&const_assign_error)
+      .CallRuntime(Runtime::kThrowConstAssignError, Register(), 0);
+}
+
+void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
+                                                            Token::Value op) {
+  VariableMode mode = variable->mode();
+  DCHECK(mode != CONST_LEGACY);
+  if (mode == CONST && op != Token::INIT) {
+    // Non-intializing assignments to constant is not allowed.
+    BuildThrowReassignConstant(variable->name());
+  } else if (mode == LET && op != Token::INIT) {
+    // Perform an initialization check for let declared variables.
+    // E.g. let x = (x = 20); is not allowed.
+    BuildThrowIfHole(variable->name());
+  } else {
+    DCHECK(variable->is_this() && mode == CONST && op == Token::INIT);
+    // Perform an initialization check for 'this'. 'this' variable is the
+    // only variable able to trigger bind operations outside the TDZ
+    // via 'super' calls.
+    BuildThrowIfNotHole(variable->name());
+  }
+}
 
 void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
+                                                Token::Value op,
                                                 FeedbackVectorSlot slot) {
+  VariableMode mode = variable->mode();
+  RegisterAllocationScope assignment_register_scope(this);
+  BytecodeLabel end_label;
+  bool hole_check_required =
+      (mode == CONST_LEGACY) || (mode == LET && op != Token::INIT) ||
+      (mode == CONST && op != Token::INIT) ||
+      (mode == CONST && op == Token::INIT && variable->is_this());
   switch (variable->location()) {
+    case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL: {
-      // TODO(rmcilroy): support const mode initialization.
-      Register destination(variable->index());
-      builder()->StoreAccumulatorInRegister(destination);
-      break;
-    }
-    case VariableLocation::PARAMETER: {
-      // The parameter indices are shifted by 1 (receiver is variable
-      // index -1 but is parameter index 0 in BytecodeArrayBuilder).
-      Register destination(builder()->Parameter(variable->index() + 1));
+      Register destination;
+      if (VariableLocation::PARAMETER == variable->location()) {
+        destination = Register(builder()->Parameter(variable->index() + 1));
+      } else {
+        destination = Register(variable->index());
+      }
+
+      if (hole_check_required) {
+        // Load destination to check for hole.
+        Register value_temp = register_allocator()->NewRegister();
+        builder()
+            ->StoreAccumulatorInRegister(value_temp)
+            .LoadAccumulatorWithRegister(destination);
+
+        if (mode == CONST_LEGACY && op == Token::INIT) {
+          // Perform an intialization check for legacy constants.
+          builder()
+              ->JumpIfNotHole(&end_label)
+              .MoveRegister(value_temp, destination)
+              .Bind(&end_label)
+              .LoadAccumulatorWithRegister(value_temp);
+          // Break here because the value should not be stored unconditionally.
+          break;
+        } else if (mode == CONST_LEGACY && op != Token::INIT) {
+          DCHECK(!is_strict(language_mode()));
+          // Ensure accumulator is in the correct state.
+          builder()->LoadAccumulatorWithRegister(value_temp);
+          // Break here, non-initializing assignments to legacy constants are
+          // ignored.
+          break;
+        } else {
+          BuildHoleCheckForVariableAssignment(variable, op);
+          builder()->LoadAccumulatorWithRegister(value_temp);
+        }
+      }
+
       builder()->StoreAccumulatorInRegister(destination);
       break;
     }
@@ -1290,10 +1973,10 @@
       break;
     }
     case VariableLocation::CONTEXT: {
-      // TODO(rmcilroy): support const mode initialization.
       int depth = execution_context()->ContextChainDepth(variable->scope());
       ContextScope* context = execution_context()->Previous(depth);
       Register context_reg;
+
       if (context) {
         context_reg = context->reg();
       } else {
@@ -1315,11 +1998,63 @@
         }
         builder()->LoadAccumulatorWithRegister(value_temp);
       }
+
+      if (hole_check_required) {
+        // Load destination to check for hole.
+        Register value_temp = register_allocator()->NewRegister();
+        builder()
+            ->StoreAccumulatorInRegister(value_temp)
+            .LoadContextSlot(context_reg, variable->index());
+
+        if (mode == CONST_LEGACY && op == Token::INIT) {
+          // Perform an intialization check for legacy constants.
+          builder()
+              ->JumpIfNotHole(&end_label)
+              .LoadAccumulatorWithRegister(value_temp)
+              .StoreContextSlot(context_reg, variable->index())
+              .Bind(&end_label);
+          builder()->LoadAccumulatorWithRegister(value_temp);
+          // Break here because the value should not be stored unconditionally.
+          // The above code performs the store conditionally.
+          break;
+        } else if (mode == CONST_LEGACY && op != Token::INIT) {
+          DCHECK(!is_strict(language_mode()));
+          // Ensure accumulator is in the correct state.
+          builder()->LoadAccumulatorWithRegister(value_temp);
+          // Break here, non-initializing assignments to legacy constants are
+          // ignored.
+          break;
+        } else {
+          BuildHoleCheckForVariableAssignment(variable, op);
+          builder()->LoadAccumulatorWithRegister(value_temp);
+        }
+      }
+
       builder()->StoreContextSlot(context_reg, variable->index());
       break;
     }
     case VariableLocation::LOOKUP: {
-      builder()->StoreLookupSlot(variable->name(), language_mode());
+      if (mode == CONST_LEGACY && op == Token::INIT) {
+        register_allocator()->PrepareForConsecutiveAllocations(3);
+        Register value = register_allocator()->NextConsecutiveRegister();
+        Register context = register_allocator()->NextConsecutiveRegister();
+        Register name = register_allocator()->NextConsecutiveRegister();
+
+        // InitializeLegacyConstLookupSlot runtime call returns the 'value'
+        // passed to it. So, accumulator will have its original contents when
+        // runtime call returns.
+        builder()
+            ->StoreAccumulatorInRegister(value)
+            .MoveRegister(execution_context()->reg(), context)
+            .LoadLiteral(variable->name())
+            .StoreAccumulatorInRegister(name)
+            .CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, value, 3);
+      } else if (mode == CONST_LEGACY && op != Token::INIT) {
+        // Non-intializing assignments to legacy constants are ignored.
+        DCHECK(!is_strict(language_mode()));
+      } else {
+        builder()->StoreLookupSlot(variable->name(), language_mode());
+      }
       break;
     }
   }
@@ -1327,8 +2062,8 @@
 
 
 void BytecodeGenerator::VisitAssignment(Assignment* expr) {
-  DCHECK(expr->target()->IsValidReferenceExpression());
-  Register object, key;
+  DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
+  Register object, key, home_object, value;
   Handle<String> name;
 
   // Left-hand side can only be a property, a global or a variable slot.
@@ -1358,9 +2093,35 @@
       }
       break;
     }
-    case NAMED_SUPER_PROPERTY:
-    case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+    case NAMED_SUPER_PROPERTY: {
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      object = register_allocator()->NextConsecutiveRegister();
+      home_object = register_allocator()->NextConsecutiveRegister();
+      key = register_allocator()->NextConsecutiveRegister();
+      value = register_allocator()->NextConsecutiveRegister();
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), object);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      builder()
+          ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+          .StoreAccumulatorInRegister(key);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      object = register_allocator()->NextConsecutiveRegister();
+      home_object = register_allocator()->NextConsecutiveRegister();
+      key = register_allocator()->NextConsecutiveRegister();
+      value = register_allocator()->NextConsecutiveRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), object);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      VisitForRegisterValue(property->key(), key);
+      break;
+    }
   }
 
   // Evaluate the value and potentially handle compound assignments by loading
@@ -1378,8 +2139,7 @@
         FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
         old_value = register_allocator()->NewRegister();
         builder()
-            ->LoadNamedProperty(object, name, feedback_index(slot),
-                                language_mode())
+            ->LoadNamedProperty(object, name, feedback_index(slot))
             .StoreAccumulatorInRegister(old_value);
         break;
       }
@@ -1389,18 +2149,25 @@
         FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
         old_value = register_allocator()->NewRegister();
         builder()
-            ->LoadKeyedProperty(object, feedback_index(slot), language_mode())
+            ->LoadKeyedProperty(object, feedback_index(slot))
             .StoreAccumulatorInRegister(old_value);
         break;
       }
-      case NAMED_SUPER_PROPERTY:
-      case KEYED_SUPER_PROPERTY:
-        UNIMPLEMENTED();
+      case NAMED_SUPER_PROPERTY: {
+        old_value = register_allocator()->NewRegister();
+        BuildNamedSuperPropertyLoad(object, home_object, key);
+        builder()->StoreAccumulatorInRegister(old_value);
         break;
+      }
+      case KEYED_SUPER_PROPERTY: {
+        old_value = register_allocator()->NewRegister();
+        BuildKeyedSuperPropertyLoad(object, home_object, key);
+        builder()->StoreAccumulatorInRegister(old_value);
+        break;
+      }
     }
     VisitForAccumulatorValue(expr->value());
-    builder()->BinaryOperation(expr->binary_op(), old_value,
-                               language_mode_strength());
+    builder()->BinaryOperation(expr->binary_op(), old_value);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1412,7 +2179,7 @@
       // TODO(oth): The VisitVariableAssignment() call is hard to reason about.
       // Is the value in the accumulator safe? Yes, but scary.
       Variable* variable = expr->target()->AsVariableProxy()->var();
-      VisitVariableAssignment(variable, slot);
+      VisitVariableAssignment(variable, expr->op(), slot);
       break;
     }
     case NAMED_PROPERTY:
@@ -1423,9 +2190,16 @@
       builder()->StoreKeyedProperty(object, key, feedback_index(slot),
                                     language_mode());
       break;
-    case NAMED_SUPER_PROPERTY:
-    case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+    case NAMED_SUPER_PROPERTY: {
+      builder()->StoreAccumulatorInRegister(value);
+      BuildNamedSuperPropertyStore(object, home_object, key, value);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      builder()->StoreAccumulatorInRegister(value);
+      BuildKeyedSuperPropertyStore(object, home_object, key, value);
+      break;
+    }
   }
   execution_result()->SetResultInAccumulator();
 }
@@ -1437,6 +2211,11 @@
 void BytecodeGenerator::VisitThrow(Throw* expr) {
   VisitForAccumulatorValue(expr->exception());
   builder()->Throw();
+  // Throw statments are modeled as expression instead of statments. These are
+  // converted from assignment statements in Rewriter::ReWrite pass. An
+  // assignment statement expects a value in the accumulator. This is a hack to
+  // avoid DCHECK fails assert accumulator has been set.
+  execution_result()->SetResultInAccumulator();
 }
 
 
@@ -1449,34 +2228,84 @@
     case NAMED_PROPERTY: {
       builder()->LoadNamedProperty(obj,
                                    expr->key()->AsLiteral()->AsPropertyName(),
-                                   feedback_index(slot), language_mode());
+                                   feedback_index(slot));
       break;
     }
     case KEYED_PROPERTY: {
       VisitForAccumulatorValue(expr->key());
-      builder()->LoadKeyedProperty(obj, feedback_index(slot), language_mode());
+      builder()->LoadKeyedProperty(obj, feedback_index(slot));
       break;
     }
     case NAMED_SUPER_PROPERTY:
+      VisitNamedSuperPropertyLoad(expr, Register::invalid_value());
+      break;
     case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+      VisitKeyedSuperPropertyLoad(expr, Register::invalid_value());
+      break;
   }
   execution_result()->SetResultInAccumulator();
 }
 
-
 void BytecodeGenerator::VisitPropertyLoadForAccumulator(Register obj,
                                                         Property* expr) {
   AccumulatorResultScope result_scope(this);
   VisitPropertyLoad(obj, expr);
 }
 
+void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
+                                                    Register opt_receiver_out) {
+  RegisterAllocationScope register_scope(this);
+  register_allocator()->PrepareForConsecutiveAllocations(3);
 
-void BytecodeGenerator::VisitProperty(Property* expr) {
-  Register obj = VisitForRegisterValue(expr->obj());
-  VisitPropertyLoad(obj, expr);
+  Register receiver, home_object, name;
+  receiver = register_allocator()->NextConsecutiveRegister();
+  home_object = register_allocator()->NextConsecutiveRegister();
+  name = register_allocator()->NextConsecutiveRegister();
+  SuperPropertyReference* super_property =
+      property->obj()->AsSuperPropertyReference();
+  VisitForRegisterValue(super_property->this_var(), receiver);
+  VisitForRegisterValue(super_property->home_object(), home_object);
+  builder()
+      ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+      .StoreAccumulatorInRegister(name);
+  BuildNamedSuperPropertyLoad(receiver, home_object, name);
+
+  if (opt_receiver_out.is_valid()) {
+    builder()->MoveRegister(receiver, opt_receiver_out);
+  }
 }
 
+void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property,
+                                                    Register opt_receiver_out) {
+  RegisterAllocationScope register_scope(this);
+  register_allocator()->PrepareForConsecutiveAllocations(3);
+
+  Register receiver, home_object, key;
+  receiver = register_allocator()->NextConsecutiveRegister();
+  home_object = register_allocator()->NextConsecutiveRegister();
+  key = register_allocator()->NextConsecutiveRegister();
+  SuperPropertyReference* super_property =
+      property->obj()->AsSuperPropertyReference();
+  VisitForRegisterValue(super_property->this_var(), receiver);
+  VisitForRegisterValue(super_property->home_object(), home_object);
+  VisitForRegisterValue(property->key(), key);
+  BuildKeyedSuperPropertyLoad(receiver, home_object, key);
+
+  if (opt_receiver_out.is_valid()) {
+    builder()->MoveRegister(receiver, opt_receiver_out);
+  }
+}
+
+void BytecodeGenerator::VisitProperty(Property* expr) {
+  LhsKind property_kind = Property::GetAssignType(expr);
+  if (property_kind != NAMED_SUPER_PROPERTY &&
+      property_kind != KEYED_SUPER_PROPERTY) {
+    Register obj = VisitForRegisterValue(expr->obj());
+    VisitPropertyLoad(obj, expr);
+  } else {
+    VisitPropertyLoad(Register::invalid_value(), expr);
+  }
+}
 
 Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
   if (args->length() == 0) {
@@ -1510,18 +2339,21 @@
   return first_arg;
 }
 
-
 void BytecodeGenerator::VisitCall(Call* expr) {
   Expression* callee_expr = expr->expression();
   Call::CallType call_type = expr->GetCallType(isolate());
 
+  if (call_type == Call::SUPER_CALL) {
+    return VisitCallSuper(expr);
+  }
+
   // Prepare the callee and the receiver to the function call. This depends on
   // the semantics of the underlying call type.
 
   // The receiver and arguments need to be allocated consecutively for
   // Call(). We allocate the callee and receiver consecutively for calls to
-  // kLoadLookupSlot. Future optimizations could avoid this there are no
-  // arguments or the receiver and arguments are already consecutive.
+  // %LoadLookupSlotForCall. Future optimizations could avoid this there are
+  // no arguments or the receiver and arguments are already consecutive.
   ZoneList<Expression*>* args = expr->arguments();
   register_allocator()->PrepareForConsecutiveAllocations(args->length() + 2);
   Register callee = register_allocator()->NextConsecutiveRegister();
@@ -1551,18 +2383,16 @@
     case Call::POSSIBLY_EVAL_CALL: {
       if (callee_expr->AsVariableProxy()->var()->IsLookupSlot()) {
         RegisterAllocationScope inner_register_scope(this);
-        register_allocator()->PrepareForConsecutiveAllocations(2);
-        Register context = register_allocator()->NextConsecutiveRegister();
-        Register name = register_allocator()->NextConsecutiveRegister();
+        Register name = register_allocator()->NewRegister();
 
-        // Call LoadLookupSlot to get the callee and receiver.
+        // Call %LoadLookupSlotForCall to get the callee and receiver.
         DCHECK(Register::AreContiguous(callee, receiver));
         Variable* variable = callee_expr->AsVariableProxy()->var();
         builder()
-            ->MoveRegister(Register::function_context(), context)
-            .LoadLiteral(variable->name())
+            ->LoadLiteral(variable->name())
             .StoreAccumulatorInRegister(name)
-            .CallRuntimeForPair(Runtime::kLoadLookupSlot, context, 2, callee);
+            .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name, 1,
+                                callee);
         break;
       }
       // Fall through.
@@ -1574,10 +2404,21 @@
       builder()->StoreAccumulatorInRegister(callee);
       break;
     }
-    case Call::NAMED_SUPER_PROPERTY_CALL:
-    case Call::KEYED_SUPER_PROPERTY_CALL:
+    case Call::NAMED_SUPER_PROPERTY_CALL: {
+      Property* property = callee_expr->AsProperty();
+      VisitNamedSuperPropertyLoad(property, receiver);
+      builder()->StoreAccumulatorInRegister(callee);
+      break;
+    }
+    case Call::KEYED_SUPER_PROPERTY_CALL: {
+      Property* property = callee_expr->AsProperty();
+      VisitKeyedSuperPropertyLoad(property, receiver);
+      builder()->StoreAccumulatorInRegister(callee);
+      break;
+    }
     case Call::SUPER_CALL:
-      UNIMPLEMENTED();
+      UNREACHABLE();
+      break;
   }
 
   // Evaluate all arguments to the function call and store in sequential
@@ -1615,12 +2456,39 @@
         .StoreAccumulatorInRegister(callee);
   }
 
-  // TODO(rmcilroy): Use CallIC to allow call type feedback.
-  builder()->Call(callee, receiver, args->length(),
-                  feedback_index(expr->CallFeedbackICSlot()));
+  builder()->SetExpressionPosition(expr);
+  builder()->Call(callee, receiver, 1 + args->length(),
+                  feedback_index(expr->CallFeedbackICSlot()),
+                  expr->tail_call_mode());
   execution_result()->SetResultInAccumulator();
 }
 
+void BytecodeGenerator::VisitCallSuper(Call* expr) {
+  RegisterAllocationScope register_scope(this);
+  SuperCallReference* super = expr->expression()->AsSuperCallReference();
+
+  // Prepare the constructor to the super call.
+  Register this_function = register_allocator()->NewRegister();
+  VisitForAccumulatorValue(super->this_function_var());
+  builder()
+      ->StoreAccumulatorInRegister(this_function)
+      .CallRuntime(Runtime::kInlineGetSuperConstructor, this_function, 1);
+
+  Register constructor = this_function;  // Re-use dead this_function register.
+  builder()->StoreAccumulatorInRegister(constructor);
+
+  ZoneList<Expression*>* args = expr->arguments();
+  Register first_arg = VisitArguments(args);
+
+  // The new target is loaded into the accumulator from the
+  // {new.target} variable.
+  VisitForAccumulatorValue(super->new_target_var());
+
+  // Call construct.
+  builder()->SetExpressionPosition(expr);
+  builder()->New(constructor, first_arg, args->length());
+  execution_result()->SetResultInAccumulator();
+}
 
 void BytecodeGenerator::VisitCallNew(CallNew* expr) {
   Register constructor = register_allocator()->NewRegister();
@@ -1629,27 +2497,31 @@
 
   ZoneList<Expression*>* args = expr->arguments();
   Register first_arg = VisitArguments(args);
-  builder()->New(constructor, first_arg, args->length());
+
+  builder()->SetExpressionPosition(expr);
+  // The accumulator holds new target which is the same as the
+  // constructor for CallNew.
+  builder()
+      ->LoadAccumulatorWithRegister(constructor)
+      .New(constructor, first_arg, args->length());
   execution_result()->SetResultInAccumulator();
 }
 
 
 void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  Register receiver;
   if (expr->is_jsruntime()) {
     // Allocate a register for the receiver and load it with undefined.
-    register_allocator()->PrepareForConsecutiveAllocations(args->length() + 1);
-    receiver = register_allocator()->NextConsecutiveRegister();
+    register_allocator()->PrepareForConsecutiveAllocations(1 + args->length());
+    Register receiver = register_allocator()->NextConsecutiveRegister();
     builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
-  }
-  // Evaluate all arguments to the runtime call.
-  Register first_arg = VisitArguments(args);
-
-  if (expr->is_jsruntime()) {
-    DCHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
-    builder()->CallJSRuntime(expr->context_index(), receiver, args->length());
+    Register first_arg = VisitArguments(args);
+    CHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
+    builder()->CallJSRuntime(expr->context_index(), receiver,
+                             1 + args->length());
   } else {
+    // Evaluate all arguments to the runtime call.
+    Register first_arg = VisitArguments(args);
     Runtime::FunctionId function_id = expr->function()->function_id;
     builder()->CallRuntime(function_id, first_arg, args->length());
   }
@@ -1755,7 +2627,11 @@
         break;
       }
       case VariableLocation::LOOKUP: {
-        builder()->LoadLiteral(variable->name()).DeleteLookupSlot();
+        Register name_reg = register_allocator()->NewRegister();
+        builder()
+            ->LoadLiteral(variable->name())
+            .StoreAccumulatorInRegister(name_reg)
+            .CallRuntime(Runtime::kDeleteLookupSlot, name_reg, 1);
         break;
       }
       default:
@@ -1781,7 +2657,7 @@
   bool is_postfix = expr->is_postfix();
 
   // Evaluate LHS expression and get old value.
-  Register obj, key, old_value;
+  Register object, home_object, key, old_value, value;
   Handle<String> name;
   switch (assign_type) {
     case VARIABLE: {
@@ -1792,26 +2668,53 @@
     }
     case NAMED_PROPERTY: {
       FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
-      obj = VisitForRegisterValue(property->obj());
+      object = VisitForRegisterValue(property->obj());
       name = property->key()->AsLiteral()->AsPropertyName();
-      builder()->LoadNamedProperty(obj, name, feedback_index(slot),
-                                   language_mode());
+      builder()->LoadNamedProperty(object, name, feedback_index(slot));
       break;
     }
     case KEYED_PROPERTY: {
       FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
-      obj = VisitForRegisterValue(property->obj());
+      object = VisitForRegisterValue(property->obj());
       // Use visit for accumulator here since we need the key in the accumulator
       // for the LoadKeyedProperty.
       key = register_allocator()->NewRegister();
       VisitForAccumulatorValue(property->key());
       builder()->StoreAccumulatorInRegister(key).LoadKeyedProperty(
-          obj, feedback_index(slot), language_mode());
+          object, feedback_index(slot));
       break;
     }
-    case NAMED_SUPER_PROPERTY:
-    case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+    case NAMED_SUPER_PROPERTY: {
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      object = register_allocator()->NextConsecutiveRegister();
+      home_object = register_allocator()->NextConsecutiveRegister();
+      key = register_allocator()->NextConsecutiveRegister();
+      value = register_allocator()->NextConsecutiveRegister();
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), object);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      builder()
+          ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+          .StoreAccumulatorInRegister(key);
+      BuildNamedSuperPropertyLoad(object, home_object, key);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      register_allocator()->PrepareForConsecutiveAllocations(4);
+      object = register_allocator()->NextConsecutiveRegister();
+      home_object = register_allocator()->NextConsecutiveRegister();
+      key = register_allocator()->NextConsecutiveRegister();
+      value = register_allocator()->NextConsecutiveRegister();
+      builder()->StoreAccumulatorInRegister(value);
+      SuperPropertyReference* super_property =
+          property->obj()->AsSuperPropertyReference();
+      VisitForRegisterValue(super_property->this_var(), object);
+      VisitForRegisterValue(super_property->home_object(), home_object);
+      VisitForRegisterValue(property->key(), key);
+      BuildKeyedSuperPropertyLoad(object, home_object, key);
+      break;
+    }
   }
 
   // Convert old value into a number.
@@ -1826,29 +2729,36 @@
   }
 
   // Perform +1/-1 operation.
-  builder()->CountOperation(expr->binary_op(), language_mode_strength());
+  builder()->CountOperation(expr->binary_op());
 
   // Store the value.
   FeedbackVectorSlot feedback_slot = expr->CountSlot();
   switch (assign_type) {
     case VARIABLE: {
       Variable* variable = expr->expression()->AsVariableProxy()->var();
-      VisitVariableAssignment(variable, feedback_slot);
+      VisitVariableAssignment(variable, expr->op(), feedback_slot);
       break;
     }
     case NAMED_PROPERTY: {
-      builder()->StoreNamedProperty(obj, name, feedback_index(feedback_slot),
+      builder()->StoreNamedProperty(object, name, feedback_index(feedback_slot),
                                     language_mode());
       break;
     }
     case KEYED_PROPERTY: {
-      builder()->StoreKeyedProperty(obj, key, feedback_index(feedback_slot),
+      builder()->StoreKeyedProperty(object, key, feedback_index(feedback_slot),
                                     language_mode());
       break;
     }
-    case NAMED_SUPER_PROPERTY:
-    case KEYED_SUPER_PROPERTY:
-      UNIMPLEMENTED();
+    case NAMED_SUPER_PROPERTY: {
+      builder()->StoreAccumulatorInRegister(value);
+      BuildNamedSuperPropertyStore(object, home_object, key, value);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      builder()->StoreAccumulatorInRegister(value);
+      BuildKeyedSuperPropertyStore(object, home_object, key, value);
+      break;
+    }
   }
 
   // Restore old value for postfix expressions.
@@ -1881,7 +2791,7 @@
 void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
-  builder()->CompareOperation(expr->op(), lhs, language_mode_strength());
+  builder()->CompareOperation(expr->op(), lhs);
   execution_result()->SetResultInAccumulator();
 }
 
@@ -1889,7 +2799,7 @@
 void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
-  builder()->BinaryOperation(expr->op(), lhs, language_mode_strength());
+  builder()->BinaryOperation(expr->op(), lhs);
   execution_result()->SetResultInAccumulator();
 }
 
@@ -1908,13 +2818,15 @@
 
 
 void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
-  UNIMPLEMENTED();
+  // Handled by VisitCall().
+  UNREACHABLE();
 }
 
 
 void BytecodeGenerator::VisitSuperPropertyReference(
     SuperPropertyReference* expr) {
-  UNIMPLEMENTED();
+  builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError, Register(0), 0);
+  execution_result()->SetResultInAccumulator();
 }
 
 
@@ -1962,8 +2874,7 @@
 }
 
 
-void BytecodeGenerator::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* expr) {
+void BytecodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
   Visit(expr->expression());
 }
 
@@ -2040,6 +2951,40 @@
   execution_result()->SetResultInAccumulator();
 }
 
+void BytecodeGenerator::VisitNewLocalWithContext() {
+  AccumulatorResultScope accumulator_execution_result(this);
+
+  register_allocator()->PrepareForConsecutiveAllocations(2);
+  Register extension_object = register_allocator()->NextConsecutiveRegister();
+  Register closure = register_allocator()->NextConsecutiveRegister();
+
+  builder()->StoreAccumulatorInRegister(extension_object);
+  VisitFunctionClosureForContext();
+  builder()->StoreAccumulatorInRegister(closure).CallRuntime(
+      Runtime::kPushWithContext, extension_object, 2);
+  execution_result()->SetResultInAccumulator();
+}
+
+void BytecodeGenerator::VisitNewLocalCatchContext(Variable* variable) {
+  AccumulatorResultScope accumulator_execution_result(this);
+  DCHECK(variable->IsContextSlot());
+
+  // Allocate a new local block context.
+  register_allocator()->PrepareForConsecutiveAllocations(3);
+  Register name = register_allocator()->NextConsecutiveRegister();
+  Register exception = register_allocator()->NextConsecutiveRegister();
+  Register closure = register_allocator()->NextConsecutiveRegister();
+
+  builder()
+      ->StoreAccumulatorInRegister(exception)
+      .LoadLiteral(variable->name())
+      .StoreAccumulatorInRegister(name);
+  VisitFunctionClosureForContext();
+  builder()->StoreAccumulatorInRegister(closure).CallRuntime(
+      Runtime::kPushCatchContext, name, 3);
+  execution_result()->SetResultInAccumulator();
+}
+
 
 void BytecodeGenerator::VisitObjectLiteralAccessor(
     Register home_object, ObjectLiteralProperty* property, Register value_out) {
@@ -2053,14 +2998,17 @@
   }
 }
 
-
 void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
                                            ObjectLiteralProperty* property,
                                            int slot_number) {
   Expression* expr = property->value();
-  if (!FunctionLiteral::NeedsHomeObject(expr)) return;
-
-  UNIMPLEMENTED();
+  if (FunctionLiteral::NeedsHomeObject(expr)) {
+    Handle<Name> name = isolate()->factory()->home_object_symbol();
+    FeedbackVectorSlot slot = property->GetSlot(slot_number);
+    builder()
+        ->LoadAccumulatorWithRegister(home_object)
+        .StoreNamedProperty(value, name, feedback_index(slot), language_mode());
+  }
 }
 
 
@@ -2076,19 +3024,26 @@
           ? CreateArgumentsType::kUnmappedArguments
           : CreateArgumentsType::kMappedArguments;
   builder()->CreateArguments(type);
-  VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+  VisitVariableAssignment(variable, Token::ASSIGN,
+                          FeedbackVectorSlot::Invalid());
 }
 
+void BytecodeGenerator::VisitRestArgumentsArray(Variable* rest) {
+  if (rest == nullptr) return;
+
+  // Allocate and initialize a new rest parameter and assign to the {rest}
+  // variable.
+  builder()->CreateArguments(CreateArgumentsType::kRestParameter);
+  DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
+  VisitVariableAssignment(rest, Token::ASSIGN, FeedbackVectorSlot::Invalid());
+}
 
 void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
   if (variable == nullptr) return;
 
-  // TODO(rmcilroy): Remove once we have tests which exercise this code path.
-  UNIMPLEMENTED();
-
   // Store the closure we were called with in the given variable.
   builder()->LoadAccumulatorWithRegister(Register::function_closure());
-  VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+  VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
 }
 
 
@@ -2097,7 +3052,7 @@
 
   // Store the new target we were called with in the given variable.
   builder()->LoadAccumulatorWithRegister(Register::new_target());
-  VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+  VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
 }
 
 
@@ -2114,6 +3069,12 @@
                           Context::NATIVE_CONTEXT_INDEX)
         .StoreAccumulatorInRegister(native_context)
         .LoadContextSlot(native_context, Context::CLOSURE_INDEX);
+  } else if (closure_scope->is_eval_scope()) {
+    // Contexts created by a call to eval have the same closure as the
+    // context calling eval, not the anonymous closure containing the eval
+    // code. Fetch it from the context.
+    builder()->LoadContextSlot(execution_context()->reg(),
+                               Context::CLOSURE_INDEX);
   } else {
     DCHECK(closure_scope->is_function_scope());
     builder()->LoadAccumulatorWithRegister(Register::function_closure());
@@ -2128,6 +3089,13 @@
   Visit(expr);
 }
 
+void BytecodeGenerator::VisitForAccumulatorValueOrTheHole(Expression* expr) {
+  if (expr == nullptr) {
+    builder()->LoadTheHole();
+  } else {
+    VisitForAccumulatorValue(expr);
+  }
+}
 
 // Visits the expression |expr| and discards the result.
 void BytecodeGenerator::VisitForEffect(Expression* expr) {
@@ -2144,22 +3112,19 @@
   return register_scope.ResultRegister();
 }
 
+// Visits the expression |expr| and stores the expression result in
+// |destination|.
+void BytecodeGenerator::VisitForRegisterValue(Expression* expr,
+                                              Register destination) {
+  AccumulatorResultScope register_scope(this);
+  Visit(expr);
+  builder()->StoreAccumulatorInRegister(destination);
+}
 
-Register BytecodeGenerator::NextContextRegister() const {
-  if (execution_context() == nullptr) {
-    // Return the incoming function context for the outermost execution context.
-    return Register::function_context();
-  }
-  Register previous = execution_context()->reg();
-  if (previous == Register::function_context()) {
-    // If the previous context was the incoming function context, then the next
-    // context register is the first local context register.
-    return builder_.first_context_register();
-  } else {
-    // Otherwise use the next local context register.
-    DCHECK_LT(previous.index(), builder_.last_context_register().index());
-    return Register(previous.index() + 1);
-  }
+void BytecodeGenerator::VisitInScope(Statement* stmt, Scope* scope) {
+  ContextScope context_scope(this, scope);
+  DCHECK(scope->declarations()->is_empty());
+  Visit(stmt);
 }
 
 
@@ -2168,11 +3133,6 @@
 }
 
 
-Strength BytecodeGenerator::language_mode_strength() const {
-  return strength(language_mode());
-}
-
-
 int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
   return info()->feedback_vector()->GetIndex(slot);
 }
diff --git a/src/interpreter/bytecode-generator.h b/src/interpreter/bytecode-generator.h
index 8bda7be..4ef1738 100644
--- a/src/interpreter/bytecode-generator.h
+++ b/src/interpreter/bytecode-generator.h
@@ -13,6 +13,8 @@
 namespace internal {
 namespace interpreter {
 
+class LoopBuilder;
+
 class BytecodeGenerator final : public AstVisitor {
  public:
   BytecodeGenerator(Isolate* isolate, Zone* zone);
@@ -32,6 +34,9 @@
   class ControlScope;
   class ControlScopeForBreakable;
   class ControlScopeForIteration;
+  class ControlScopeForTopLevel;
+  class ControlScopeForTryCatch;
+  class ControlScopeForTryFinally;
   class ExpressionResultScope;
   class EffectResultScope;
   class AccumulatorResultScope;
@@ -39,7 +44,6 @@
   class RegisterAllocationScope;
 
   void MakeBytecodeBody();
-  Register NextContextRegister() const;
 
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 
@@ -61,6 +65,20 @@
   // Helper visitors which perform common operations.
   Register VisitArguments(ZoneList<Expression*>* arguments);
 
+  // Visit a keyed super property load. The optional
+  // |opt_receiver_out| register will have the receiver stored to it
+  // if it's a valid register. The loaded value is placed in the
+  // accumulator.
+  void VisitKeyedSuperPropertyLoad(Property* property,
+                                   Register opt_receiver_out);
+
+  // Visit a named super property load. The optional
+  // |opt_receiver_out| register will have the receiver stored to it
+  // if it's a valid register. The loaded value is placed in the
+  // accumulator.
+  void VisitNamedSuperPropertyLoad(Property* property,
+                                   Register opt_receiver_out);
+
   void VisitPropertyLoad(Register obj, Property* expr);
   void VisitPropertyLoadForAccumulator(Register obj, Property* expr);
 
@@ -72,14 +90,41 @@
   MUST_USE_RESULT Register
   VisitVariableLoadForRegisterValue(Variable* variable, FeedbackVectorSlot slot,
                                     TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
-  void VisitVariableAssignment(Variable* variable, FeedbackVectorSlot slot);
+  void VisitVariableAssignment(Variable* variable, Token::Value op,
+                               FeedbackVectorSlot slot);
+
+  void BuildNamedSuperPropertyStore(Register receiver, Register home_object,
+                                    Register name, Register value);
+  void BuildKeyedSuperPropertyStore(Register receiver, Register home_object,
+                                    Register key, Register value);
+  void BuildNamedSuperPropertyLoad(Register receiver, Register home_object,
+                                   Register name);
+  void BuildKeyedSuperPropertyLoad(Register receiver, Register home_object,
+                                   Register key);
+
+  void BuildThrowIfHole(Handle<String> name);
+  void BuildThrowIfNotHole(Handle<String> name);
+  void BuildThrowReassignConstant(Handle<String> name);
+  void BuildThrowReferenceError(Handle<String> name);
+  void BuildHoleCheckForVariableLoad(VariableMode mode, Handle<String> name);
+  void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
 
   void VisitArgumentsObject(Variable* variable);
+  void VisitRestArgumentsArray(Variable* rest);
+  void VisitCallSuper(Call* call);
+  void VisitClassLiteralContents(ClassLiteral* expr);
+  void VisitClassLiteralForRuntimeDefinition(ClassLiteral* expr);
+  void VisitClassLiteralProperties(ClassLiteral* expr, Register literal,
+                                   Register prototype);
+  void VisitClassLiteralStaticPrototypeWithComputedName(Register name);
   void VisitThisFunctionVariable(Variable* variable);
   void VisitNewTargetVariable(Variable* variable);
   void VisitNewLocalFunctionContext();
   void VisitBuildLocalActivationContext();
+  void VisitBlockDeclarationsAndStatements(Block* stmt);
   void VisitNewLocalBlockContext(Scope* scope);
+  void VisitNewLocalCatchContext(Variable* variable);
+  void VisitNewLocalWithContext();
   void VisitFunctionClosureForContext();
   void VisitSetHomeObject(Register value, Register home_object,
                           ObjectLiteralProperty* property, int slot_number = 0);
@@ -88,17 +133,34 @@
                                   Register value_out);
   void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
 
+  // Visit the body of a loop iteration.
+  void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop_builder);
+
+  // Visit a statement and switch scopes, the context is in the accumulator.
+  void VisitInScope(Statement* stmt, Scope* scope);
+
   // Visitors for obtaining expression result in the accumulator, in a
   // register, or just getting the effect.
-  void VisitForAccumulatorValue(Expression* expression);
-  MUST_USE_RESULT Register VisitForRegisterValue(Expression* expression);
-  void VisitForEffect(Expression* node);
+  void VisitForAccumulatorValue(Expression* expr);
+  void VisitForAccumulatorValueOrTheHole(Expression* expr);
+  MUST_USE_RESULT Register VisitForRegisterValue(Expression* expr);
+  void VisitForRegisterValue(Expression* expr, Register destination);
+  void VisitForEffect(Expression* expr);
 
   // Methods for tracking and remapping register.
   void RecordStoreToRegister(Register reg);
   Register LoadFromAliasedRegister(Register reg);
 
-  inline BytecodeArrayBuilder* builder() { return &builder_; }
+  // Methods for tracking try-block nesting.
+  bool IsInsideTryCatch() const { return try_catch_nesting_level_ > 0; }
+  bool IsInsideTryFinally() const { return try_finally_nesting_level_ > 0; }
+
+  // Initialize an array of temporary registers with consecutive registers.
+  template <size_t N>
+  void InitializeWithConsecutiveRegisters(Register (&registers)[N]);
+
+  inline void set_builder(BytecodeArrayBuilder* builder) { builder_ = builder; }
+  inline BytecodeArrayBuilder* builder() const { return builder_; }
 
   inline Isolate* isolate() const { return isolate_; }
   inline Zone* zone() const { return zone_; }
@@ -130,12 +192,11 @@
 
   ZoneVector<Handle<Object>>* globals() { return &globals_; }
   inline LanguageMode language_mode() const;
-  Strength language_mode_strength() const;
   int feedback_index(FeedbackVectorSlot slot) const;
 
   Isolate* isolate_;
   Zone* zone_;
-  BytecodeArrayBuilder builder_;
+  BytecodeArrayBuilder* builder_;
   CompilationInfo* info_;
   Scope* scope_;
   ZoneVector<Handle<Object>> globals_;
@@ -143,6 +204,8 @@
   ContextScope* execution_context_;
   ExpressionResultScope* execution_result_;
   RegisterAllocationScope* register_allocator_;
+  int try_catch_nesting_level_;
+  int try_finally_nesting_level_;
 };
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-register-allocator.cc b/src/interpreter/bytecode-register-allocator.cc
index 4efb612..0a617c0 100644
--- a/src/interpreter/bytecode-register-allocator.cc
+++ b/src/interpreter/bytecode-register-allocator.cc
@@ -10,17 +10,173 @@
 namespace internal {
 namespace interpreter {
 
+TemporaryRegisterAllocator::TemporaryRegisterAllocator(Zone* zone,
+                                                       int allocation_base)
+    : free_temporaries_(zone),
+      allocation_base_(allocation_base),
+      allocation_count_(0) {}
+
+Register TemporaryRegisterAllocator::first_temporary_register() const {
+  DCHECK(allocation_count() > 0);
+  return Register(allocation_base());
+}
+
+Register TemporaryRegisterAllocator::last_temporary_register() const {
+  DCHECK(allocation_count() > 0);
+  return Register(allocation_base() + allocation_count() - 1);
+}
+
+int TemporaryRegisterAllocator::AllocateTemporaryRegister() {
+  allocation_count_ += 1;
+  return allocation_base() + allocation_count() - 1;
+}
+
+int TemporaryRegisterAllocator::BorrowTemporaryRegister() {
+  if (free_temporaries_.empty()) {
+    return AllocateTemporaryRegister();
+  } else {
+    auto pos = free_temporaries_.begin();
+    int retval = *pos;
+    free_temporaries_.erase(pos);
+    return retval;
+  }
+}
+
+int TemporaryRegisterAllocator::BorrowTemporaryRegisterNotInRange(
+    int start_index, int end_index) {
+  if (free_temporaries_.empty()) {
+    int next_allocation = allocation_base() + allocation_count();
+    while (next_allocation >= start_index && next_allocation <= end_index) {
+      free_temporaries_.insert(AllocateTemporaryRegister());
+      next_allocation += 1;
+    }
+    return AllocateTemporaryRegister();
+  }
+
+  ZoneSet<int>::iterator index = free_temporaries_.lower_bound(start_index);
+  if (index == free_temporaries_.begin()) {
+    // If start_index is the first free register, check for a register
+    // greater than end_index.
+    index = free_temporaries_.upper_bound(end_index);
+    if (index == free_temporaries_.end()) {
+      return AllocateTemporaryRegister();
+    }
+  } else {
+    // If there is a free register < start_index
+    index--;
+  }
+
+  int retval = *index;
+  free_temporaries_.erase(index);
+  return retval;
+}
+
+int TemporaryRegisterAllocator::PrepareForConsecutiveTemporaryRegisters(
+    size_t count) {
+  if (count == 0) {
+    return -1;
+  }
+
+  // TODO(oth): replace use of set<> here for free_temporaries with a
+  // more efficient structure. And/or partition into two searches -
+  // one before the translation window and one after.
+
+  // A run will require at least |count| free temporaries.
+  while (free_temporaries_.size() < count) {
+    free_temporaries_.insert(AllocateTemporaryRegister());
+  }
+
+  // Search within existing temporaries for a run.
+  auto start = free_temporaries_.begin();
+  size_t run_length = 0;
+  for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) {
+    int expected = *start + static_cast<int>(run_length);
+    if (*run_end != expected) {
+      start = run_end;
+      run_length = 0;
+    }
+    Register reg_start(*start);
+    Register reg_expected(expected);
+    if (RegisterTranslator::DistanceToTranslationWindow(reg_start) > 0 &&
+        RegisterTranslator::DistanceToTranslationWindow(reg_expected) <= 0) {
+      // Run straddles the lower edge of the translation window. Registers
+      // after the start of this boundary are displaced by the register
+      // translator to provide a hole for translation. Runs either side
+      // of the boundary are fine.
+      start = run_end;
+      run_length = 0;
+    }
+    if (++run_length == count) {
+      return *start;
+    }
+  }
+
+  // Continue run if possible across existing last temporary.
+  if (allocation_count_ > 0 && (start == free_temporaries_.end() ||
+                                *start + static_cast<int>(run_length) !=
+                                    last_temporary_register().index() + 1)) {
+    run_length = 0;
+  }
+
+  // Pad temporaries if extended run would cross translation boundary.
+  Register reg_first(*start);
+  Register reg_last(*start + static_cast<int>(count) - 1);
+  DCHECK_GT(RegisterTranslator::DistanceToTranslationWindow(reg_first),
+            RegisterTranslator::DistanceToTranslationWindow(reg_last));
+  while (RegisterTranslator::DistanceToTranslationWindow(reg_first) > 0 &&
+         RegisterTranslator::DistanceToTranslationWindow(reg_last) <= 0) {
+    auto pos_insert_pair =
+        free_temporaries_.insert(AllocateTemporaryRegister());
+    reg_first = Register(*pos_insert_pair.first);
+    reg_last = Register(reg_first.index() + static_cast<int>(count) - 1);
+    run_length = 0;
+  }
+
+  // Ensure enough registers for run.
+  while (run_length++ < count) {
+    free_temporaries_.insert(AllocateTemporaryRegister());
+  }
+
+  int run_start =
+      last_temporary_register().index() - static_cast<int>(count) + 1;
+  DCHECK(RegisterTranslator::DistanceToTranslationWindow(Register(run_start)) <=
+             0 ||
+         RegisterTranslator::DistanceToTranslationWindow(
+             Register(run_start + static_cast<int>(count) - 1)) > 0);
+  return run_start;
+}
+
+bool TemporaryRegisterAllocator::RegisterIsLive(Register reg) const {
+  if (allocation_count_ > 0) {
+    DCHECK(reg >= first_temporary_register() &&
+           reg <= last_temporary_register());
+    return free_temporaries_.find(reg.index()) == free_temporaries_.end();
+  } else {
+    return false;
+  }
+}
+
+void TemporaryRegisterAllocator::BorrowConsecutiveTemporaryRegister(
+    int reg_index) {
+  DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
+  free_temporaries_.erase(reg_index);
+}
+
+void TemporaryRegisterAllocator::ReturnTemporaryRegister(int reg_index) {
+  DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
+  free_temporaries_.insert(reg_index);
+}
+
 BytecodeRegisterAllocator::BytecodeRegisterAllocator(
-    BytecodeArrayBuilder* builder)
-    : builder_(builder),
-      allocated_(builder->zone()),
+    Zone* zone, TemporaryRegisterAllocator* allocator)
+    : base_allocator_(allocator),
+      allocated_(zone),
       next_consecutive_register_(-1),
       next_consecutive_count_(-1) {}
 
-
 BytecodeRegisterAllocator::~BytecodeRegisterAllocator() {
   for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
-    builder_->ReturnTemporaryRegister(*i);
+    base_allocator()->ReturnTemporaryRegister(*i);
   }
   allocated_.clear();
 }
@@ -29,9 +185,9 @@
 Register BytecodeRegisterAllocator::NewRegister() {
   int allocated = -1;
   if (next_consecutive_count_ <= 0) {
-    allocated = builder_->BorrowTemporaryRegister();
+    allocated = base_allocator()->BorrowTemporaryRegister();
   } else {
-    allocated = builder_->BorrowTemporaryRegisterNotInRange(
+    allocated = base_allocator()->BorrowTemporaryRegisterNotInRange(
         next_consecutive_register_,
         next_consecutive_register_ + next_consecutive_count_ - 1);
   }
@@ -52,7 +208,7 @@
 void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
   if (static_cast<int>(count) > next_consecutive_count_) {
     next_consecutive_register_ =
-        builder_->PrepareForConsecutiveTemporaryRegisters(count);
+        base_allocator()->PrepareForConsecutiveTemporaryRegisters(count);
     next_consecutive_count_ = static_cast<int>(count);
   }
 }
@@ -61,7 +217,8 @@
 Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
   DCHECK_GE(next_consecutive_register_, 0);
   DCHECK_GT(next_consecutive_count_, 0);
-  builder_->BorrowConsecutiveTemporaryRegister(next_consecutive_register_);
+  base_allocator()->BorrowConsecutiveTemporaryRegister(
+      next_consecutive_register_);
   allocated_.push_back(next_consecutive_register_);
   next_consecutive_count_--;
   return Register(next_consecutive_register_++);
diff --git a/src/interpreter/bytecode-register-allocator.h b/src/interpreter/bytecode-register-allocator.h
index 74ab3a4..696a3b1 100644
--- a/src/interpreter/bytecode-register-allocator.h
+++ b/src/interpreter/bytecode-register-allocator.h
@@ -5,6 +5,7 @@
 #ifndef V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
 #define V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
 
+#include "src/interpreter/bytecodes.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
@@ -14,26 +15,82 @@
 class BytecodeArrayBuilder;
 class Register;
 
+class TemporaryRegisterAllocator final {
+ public:
+  TemporaryRegisterAllocator(Zone* zone, int start_index);
+
+  // Borrow a temporary register.
+  int BorrowTemporaryRegister();
+
+  // Borrow a temporary register from the register range outside of
+  // |start_index| to |end_index|.
+  int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
+
+  // Return a temporary register when no longer used.
+  void ReturnTemporaryRegister(int reg_index);
+
+  // Ensure a run of consecutive registers is available. Each register in
+  // the range should be borrowed with BorrowConsecutiveTemporaryRegister().
+  // Returns the start index of the run.
+  int PrepareForConsecutiveTemporaryRegisters(size_t count);
+
+  // Borrow a register from a range prepared with
+  // PrepareForConsecutiveTemporaryRegisters().
+  void BorrowConsecutiveTemporaryRegister(int reg_index);
+
+  // Returns true if |reg| is a temporary register and is currently
+  // borrowed.
+  bool RegisterIsLive(Register reg) const;
+
+  // Returns the first register in the range of temporary registers.
+  Register first_temporary_register() const;
+
+  // Returns the last register in the range of temporary registers.
+  Register last_temporary_register() const;
+
+  // Returns the start index of temporary register allocations.
+  int allocation_base() const { return allocation_base_; }
+
+  // Returns the number of temporary register allocations made.
+  int allocation_count() const { return allocation_count_; }
+
+ private:
+  // Allocate a temporary register.
+  int AllocateTemporaryRegister();
+
+  ZoneSet<int> free_temporaries_;
+  int allocation_base_;
+  int allocation_count_;
+
+  DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterAllocator);
+};
+
 // A class than allows the instantiator to allocate temporary registers that are
 // cleaned up when scope is closed.
-class BytecodeRegisterAllocator {
+class BytecodeRegisterAllocator final {
  public:
-  explicit BytecodeRegisterAllocator(BytecodeArrayBuilder* builder);
+  explicit BytecodeRegisterAllocator(Zone* zone,
+                                     TemporaryRegisterAllocator* allocator);
   ~BytecodeRegisterAllocator();
   Register NewRegister();
 
+  // Ensure |count| consecutive allocations are available.
   void PrepareForConsecutiveAllocations(size_t count);
+
+  // Get the next consecutive allocation after calling
+  // PrepareForConsecutiveAllocations.
   Register NextConsecutiveRegister();
 
+  // Returns true if |reg| is allocated in this allocator.
   bool RegisterIsAllocatedInThisScope(Register reg) const;
 
+  // Returns true if unused consecutive allocations remain.
   bool HasConsecutiveAllocations() const { return next_consecutive_count_ > 0; }
 
  private:
-  void* operator new(size_t size);
-  void operator delete(void* p);
+  TemporaryRegisterAllocator* base_allocator() const { return base_allocator_; }
 
-  BytecodeArrayBuilder* builder_;
+  TemporaryRegisterAllocator* base_allocator_;
   ZoneVector<int> allocated_;
   int next_consecutive_register_;
   int next_consecutive_count_;
diff --git a/src/interpreter/bytecode-traits.h b/src/interpreter/bytecode-traits.h
index fd778d7..b813605 100644
--- a/src/interpreter/bytecode-traits.h
+++ b/src/interpreter/bytecode-traits.h
@@ -28,6 +28,18 @@
 OPERAND_TYPE_LIST(DECLARE_OPERAND_SIZE)
 #undef DECLARE_OPERAND_SIZE
 
+template <OperandType>
+struct RegisterOperandTraits {
+  static const int kIsRegisterOperand = 0;
+};
+
+#define DECLARE_REGISTER_OPERAND(Name, _)              \
+  template <>                                          \
+  struct RegisterOperandTraits<OperandType::k##Name> { \
+    static const int kIsRegisterOperand = 1;           \
+  };
+REGISTER_OPERAND_TYPE_LIST(DECLARE_REGISTER_OPERAND)
+#undef DECLARE_REGISTER_OPERAND
 
 template <OperandType... Args>
 struct BytecodeTraits {};
@@ -63,13 +75,28 @@
     return kOperandOffsets[i];
   }
 
+  template <OperandType ot>
+  static inline bool HasAnyOperandsOfType() {
+    return operand_0 == ot || operand_1 == ot || operand_2 == ot ||
+           operand_3 == ot;
+  }
+
   static const int kOperandCount = 4;
+  static const int kRegisterOperandCount =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_1>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_2>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_3>::kIsRegisterOperand;
+  static const int kRegisterOperandBitmap =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
+      (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2) +
+      (RegisterOperandTraits<operand_3>::kIsRegisterOperand << 3);
   static const int kSize =
       1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
       OperandTraits<operand_2>::kSize + OperandTraits<operand_3>::kSize;
 };
 
-
 template <OperandType operand_0, OperandType operand_1, OperandType operand_2>
 struct BytecodeTraits<operand_0, operand_1, operand_2, OPERAND_TERM> {
   static inline OperandType GetOperandType(int i) {
@@ -96,7 +123,20 @@
     return kOperandOffsets[i];
   }
 
+  template <OperandType ot>
+  static inline bool HasAnyOperandsOfType() {
+    return operand_0 == ot || operand_1 == ot || operand_2 == ot;
+  }
+
   static const int kOperandCount = 3;
+  static const int kRegisterOperandCount =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_1>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_2>::kIsRegisterOperand;
+  static const int kRegisterOperandBitmap =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
+      (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2);
   static const int kSize =
       1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
       OperandTraits<operand_2>::kSize;
@@ -126,7 +166,18 @@
     return kOperandOffsets[i];
   }
 
+  template <OperandType ot>
+  static inline bool HasAnyOperandsOfType() {
+    return operand_0 == ot || operand_1 == ot;
+  }
+
   static const int kOperandCount = 2;
+  static const int kRegisterOperandCount =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      RegisterOperandTraits<operand_1>::kIsRegisterOperand;
+  static const int kRegisterOperandBitmap =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+      (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1);
   static const int kSize =
       1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize;
 };
@@ -148,7 +199,16 @@
     return 1;
   }
 
+  template <OperandType ot>
+  static inline bool HasAnyOperandsOfType() {
+    return operand_0 == ot;
+  }
+
   static const int kOperandCount = 1;
+  static const int kRegisterOperandCount =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand;
+  static const int kRegisterOperandBitmap =
+      RegisterOperandTraits<operand_0>::kIsRegisterOperand;
   static const int kSize = 1 + OperandTraits<operand_0>::kSize;
 };
 
@@ -169,7 +229,14 @@
     return 1;
   }
 
+  template <OperandType ot>
+  static inline bool HasAnyOperandsOfType() {
+    return false;
+  }
+
   static const int kOperandCount = 0;
+  static const int kRegisterOperandCount = 0;
+  static const int kRegisterOperandBitmap = 0;
   static const int kSize = 1 + OperandTraits<OperandType::kNone>::kSize;
 };
 
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index 2d4406c..c3b17c7 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -57,6 +57,7 @@
 
 // static
 uint8_t Bytecodes::ToByte(Bytecode bytecode) {
+  DCHECK(bytecode <= Bytecode::kLast);
   return static_cast<uint8_t>(bytecode);
 }
 
@@ -70,6 +71,21 @@
 
 
 // static
+Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
+  switch (Size(bytecode)) {
+#define CASE(Name, ...)                                  \
+  case BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize: \
+    return Bytecode::k##Name;
+    DEBUG_BREAK_BYTECODE_LIST(CASE)
+#undef CASE
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return static_cast<Bytecode>(-1);
+}
+
+// static
 int Bytecodes::Size(Bytecode bytecode) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
@@ -100,6 +116,21 @@
 
 
 // static
+int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)                                            \
+  case Bytecode::k##Name:                                          \
+    typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+    return Name##Trait::kRegisterOperandCount;
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return false;
+}
+
+// static
 OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
@@ -130,6 +161,21 @@
 
 
 // static
+int Bytecodes::GetRegisterOperandBitmap(Bytecode bytecode) {
+  DCHECK(bytecode <= Bytecode::kLast);
+  switch (bytecode) {
+#define CASE(Name, ...)                                            \
+  case Bytecode::k##Name:                                          \
+    typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+    return Name##Trait::kRegisterOperandBitmap;
+    BYTECODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return false;
+}
+
+// static
 int Bytecodes::GetOperandOffset(Bytecode bytecode, int i) {
   DCHECK(bytecode <= Bytecode::kLast);
   switch (bytecode) {
@@ -164,6 +210,7 @@
          bytecode == Bytecode::kJumpIfFalse ||
          bytecode == Bytecode::kJumpIfToBooleanTrue ||
          bytecode == Bytecode::kJumpIfToBooleanFalse ||
+         bytecode == Bytecode::kJumpIfNotHole ||
          bytecode == Bytecode::kJumpIfNull ||
          bytecode == Bytecode::kJumpIfUndefined;
 }
@@ -175,6 +222,7 @@
          bytecode == Bytecode::kJumpIfFalseConstant ||
          bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
          bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
+         bytecode == Bytecode::kJumpIfNotHoleConstant ||
          bytecode == Bytecode::kJumpIfNullConstant ||
          bytecode == Bytecode::kJumpIfUndefinedConstant;
 }
@@ -186,6 +234,7 @@
          bytecode == Bytecode::kJumpIfFalseConstantWide ||
          bytecode == Bytecode::kJumpIfToBooleanTrueConstantWide ||
          bytecode == Bytecode::kJumpIfToBooleanFalseConstantWide ||
+         bytecode == Bytecode::kJumpIfNotHoleConstantWide ||
          bytecode == Bytecode::kJumpIfNullConstantWide ||
          bytecode == Bytecode::kJumpIfUndefinedConstantWide;
 }
@@ -227,10 +276,122 @@
 
 
 // static
+bool Bytecodes::IsCallOrNew(Bytecode bytecode) {
+  return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
+         bytecode == Bytecode::kNew || bytecode == Bytecode::kCallWide ||
+         bytecode == Bytecode::kTailCallWide || bytecode == Bytecode::kNewWide;
+}
+
+// static
+bool Bytecodes::IsDebugBreak(Bytecode bytecode) {
+  switch (bytecode) {
+#define CASE(Name, ...) case Bytecode::k##Name:
+    DEBUG_BREAK_BYTECODE_LIST(CASE);
+#undef CASE
+    return true;
+    default:
+      break;
+  }
+  return false;
+}
+
+// static
 bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
   return bytecode == Bytecode::kReturn || IsJump(bytecode);
 }
 
+// static
+bool Bytecodes::IsIndexOperandType(OperandType operand_type) {
+  return operand_type == OperandType::kIdx8 ||
+         operand_type == OperandType::kIdx16;
+}
+
+// static
+bool Bytecodes::IsImmediateOperandType(OperandType operand_type) {
+  return operand_type == OperandType::kImm8;
+}
+
+// static
+bool Bytecodes::IsRegisterCountOperandType(OperandType operand_type) {
+  return (operand_type == OperandType::kRegCount8 ||
+          operand_type == OperandType::kRegCount16);
+}
+
+// static
+bool Bytecodes::IsMaybeRegisterOperandType(OperandType operand_type) {
+  return (operand_type == OperandType::kMaybeReg8 ||
+          operand_type == OperandType::kMaybeReg16);
+}
+
+// static
+bool Bytecodes::IsRegisterOperandType(OperandType operand_type) {
+  switch (operand_type) {
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    return true;
+    REGISTER_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    break;
+    NON_REGISTER_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+  }
+  return false;
+}
+
+// static
+bool Bytecodes::IsRegisterInputOperandType(OperandType operand_type) {
+  switch (operand_type) {
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    return true;
+    REGISTER_INPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    break;
+    NON_REGISTER_OPERAND_TYPE_LIST(CASE)
+    REGISTER_OUTPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+  }
+  return false;
+}
+
+// static
+bool Bytecodes::IsRegisterOutputOperandType(OperandType operand_type) {
+  switch (operand_type) {
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    return true;
+    REGISTER_OUTPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+#define CASE(Name, _)        \
+  case OperandType::k##Name: \
+    break;
+    NON_REGISTER_OPERAND_TYPE_LIST(CASE)
+    REGISTER_INPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+  }
+  return false;
+}
+
+namespace {
+static Register DecodeRegister(const uint8_t* operand_start,
+                               OperandType operand_type) {
+  switch (Bytecodes::SizeOfOperand(operand_type)) {
+    case OperandSize::kByte:
+      return Register::FromOperand(*operand_start);
+    case OperandSize::kShort:
+      return Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
+    case OperandSize::kNone: {
+      UNREACHABLE();
+    }
+  }
+  return Register();
+}
+}  // namespace
+
 
 // static
 std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
@@ -251,16 +412,20 @@
 
   os << bytecode << " ";
 
+  // Operands for the debug break are from the original instruction.
+  if (IsDebugBreak(bytecode)) return os;
+
   int number_of_operands = NumberOfOperands(bytecode);
+  int range = 0;
   for (int i = 0; i < number_of_operands; i++) {
     OperandType op_type = GetOperandType(bytecode, i);
     const uint8_t* operand_start =
         &bytecode_start[GetOperandOffset(bytecode, i)];
     switch (op_type) {
-      case interpreter::OperandType::kCount8:
+      case interpreter::OperandType::kRegCount8:
         os << "#" << static_cast<unsigned int>(*operand_start);
         break;
-      case interpreter::OperandType::kCount16:
+      case interpreter::OperandType::kRegCount16:
         os << '#' << ReadUnalignedUInt16(operand_start);
         break;
       case interpreter::OperandType::kIdx8:
@@ -272,48 +437,28 @@
       case interpreter::OperandType::kImm8:
         os << "#" << static_cast<int>(static_cast<int8_t>(*operand_start));
         break;
+      case interpreter::OperandType::kMaybeReg8:
+      case interpreter::OperandType::kMaybeReg16:
       case interpreter::OperandType::kReg8:
-      case interpreter::OperandType::kMaybeReg8: {
-        Register reg = Register::FromOperand(*operand_start);
-        if (reg.is_function_context()) {
-          os << "<context>";
-        } else if (reg.is_function_closure()) {
-          os << "<closure>";
-        } else if (reg.is_new_target()) {
-          os << "<new.target>";
-        } else if (reg.is_parameter()) {
-          int parameter_index = reg.ToParameterIndex(parameter_count);
-          if (parameter_index == 0) {
-            os << "<this>";
-          } else {
-            os << "a" << parameter_index - 1;
-          }
-        } else {
-          os << "r" << reg.index();
-        }
+      case interpreter::OperandType::kReg16:
+      case interpreter::OperandType::kRegOut8:
+      case interpreter::OperandType::kRegOut16: {
+        Register reg = DecodeRegister(operand_start, op_type);
+        os << reg.ToString(parameter_count);
         break;
       }
-      case interpreter::OperandType::kRegPair8: {
-        Register reg = Register::FromOperand(*operand_start);
-        if (reg.is_parameter()) {
-          int parameter_index = reg.ToParameterIndex(parameter_count);
-          DCHECK_NE(parameter_index, 0);
-          os << "a" << parameter_index - 1 << "-" << parameter_index;
-        } else {
-          os << "r" << reg.index() << "-" << reg.index() + 1;
-        }
-        break;
-      }
-      case interpreter::OperandType::kReg16: {
-        Register reg =
-            Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
-        if (reg.is_parameter()) {
-          int parameter_index = reg.ToParameterIndex(parameter_count);
-          DCHECK_NE(parameter_index, 0);
-          os << "a" << parameter_index - 1;
-        } else {
-          os << "r" << reg.index();
-        }
+      case interpreter::OperandType::kRegOutTriple8:
+      case interpreter::OperandType::kRegOutTriple16:
+        range += 1;
+      case interpreter::OperandType::kRegOutPair8:
+      case interpreter::OperandType::kRegOutPair16:
+      case interpreter::OperandType::kRegPair8:
+      case interpreter::OperandType::kRegPair16: {
+        range += 1;
+        Register first_reg = DecodeRegister(operand_start, op_type);
+        Register last_reg = Register(first_reg.index() + range);
+        os << first_reg.ToString(parameter_count) << "-"
+           << last_reg.ToString(parameter_count);
         break;
       }
       case interpreter::OperandType::kNone:
@@ -327,7 +472,6 @@
   return os;
 }
 
-
 std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode) {
   return os << Bytecodes::ToString(bytecode);
 }
@@ -342,22 +486,33 @@
   return os << Bytecodes::OperandSizeToString(operand_size);
 }
 
-
 static const int kLastParamRegisterIndex =
     -InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
 static const int kFunctionClosureRegisterIndex =
     -InterpreterFrameConstants::kFunctionFromRegisterPointer / kPointerSize;
-static const int kFunctionContextRegisterIndex =
+static const int kCurrentContextRegisterIndex =
     -InterpreterFrameConstants::kContextFromRegisterPointer / kPointerSize;
 static const int kNewTargetRegisterIndex =
     -InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
 
+// The register space is a signed 16-bit space. Register operands
+// occupy range above 0. Parameter indices are biased with the
+// negative value kLastParamRegisterIndex for ease of access in the
+// interpreter.
+static const int kMaxParameterIndex = kMaxInt16 + kLastParamRegisterIndex;
+static const int kMaxRegisterIndex = -kMinInt16;
+static const int kMaxReg8Index = -kMinInt8;
+static const int kMinReg8Index = -kMaxInt8;
+static const int kMaxReg16Index = -kMinInt16;
+static const int kMinReg16Index = -kMaxInt16;
 
-// Registers occupy range 0-127 in 8-bit value leaving 128 unused values.
-// Parameter indices are biased with the negative value kLastParamRegisterIndex
-// for ease of access in the interpreter.
-static const int kMaxParameterIndex = 128 + kLastParamRegisterIndex;
+bool Register::is_byte_operand() const {
+  return index_ >= kMinReg8Index && index_ <= kMaxReg8Index;
+}
 
+bool Register::is_short_operand() const {
+  return index_ >= kMinReg16Index && index_ <= kMaxReg16Index;
+}
 
 Register Register::FromParameterIndex(int index, int parameter_count) {
   DCHECK_GE(index, 0);
@@ -365,7 +520,6 @@
   DCHECK_LE(parameter_count, kMaxParameterIndex + 1);
   int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
   DCHECK_LT(register_index, 0);
-  DCHECK_GE(register_index, kMinInt8);
   return Register(register_index);
 }
 
@@ -386,13 +540,13 @@
 }
 
 
-Register Register::function_context() {
-  return Register(kFunctionContextRegisterIndex);
+Register Register::current_context() {
+  return Register(kCurrentContextRegisterIndex);
 }
 
 
-bool Register::is_function_context() const {
-  return index() == kFunctionContextRegisterIndex;
+bool Register::is_current_context() const {
+  return index() == kCurrentContextRegisterIndex;
 }
 
 
@@ -403,13 +557,14 @@
   return index() == kNewTargetRegisterIndex;
 }
 
-
 int Register::MaxParameterIndex() { return kMaxParameterIndex; }
 
+int Register::MaxRegisterIndex() { return kMaxRegisterIndex; }
+
+int Register::MaxRegisterIndexForByteOperand() { return kMaxReg8Index; }
 
 uint8_t Register::ToOperand() const {
-  DCHECK_GE(index_, kMinInt8);
-  DCHECK_LE(index_, kMaxInt8);
+  DCHECK(is_byte_operand());
   return static_cast<uint8_t>(-index_);
 }
 
@@ -420,8 +575,7 @@
 
 
 uint16_t Register::ToWideOperand() const {
-  DCHECK_GE(index_, kMinInt16);
-  DCHECK_LE(index_, kMaxInt16);
+  DCHECK(is_short_operand());
   return static_cast<uint16_t>(-index_);
 }
 
@@ -431,6 +585,16 @@
 }
 
 
+uint32_t Register::ToRawOperand() const {
+  return static_cast<uint32_t>(-index_);
+}
+
+
+Register Register::FromRawOperand(uint32_t operand) {
+  return Register(-static_cast<int32_t>(operand));
+}
+
+
 bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
                              Register reg4, Register reg5) {
   if (reg1.index() + 1 != reg2.index()) {
@@ -448,6 +612,29 @@
   return true;
 }
 
+std::string Register::ToString(int parameter_count) {
+  if (is_current_context()) {
+    return std::string("<context>");
+  } else if (is_function_closure()) {
+    return std::string("<closure>");
+  } else if (is_new_target()) {
+    return std::string("<new.target>");
+  } else if (is_parameter()) {
+    int parameter_index = ToParameterIndex(parameter_count);
+    if (parameter_index == 0) {
+      return std::string("<this>");
+    } else {
+      std::ostringstream s;
+      s << "a" << parameter_index - 1;
+      return s.str();
+    }
+  } else {
+    std::ostringstream s;
+    s << "r" << index();
+    return s.str();
+  }
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index a9beb6c..d4863b1 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -15,24 +15,65 @@
 namespace internal {
 namespace interpreter {
 
+#define INVALID_OPERAND_TYPE_LIST(V) \
+  V(None, OperandSize::kNone)
+
+#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
+  /* Byte operands. */                      \
+  V(MaybeReg8, OperandSize::kByte)          \
+  V(Reg8, OperandSize::kByte)               \
+  V(RegPair8, OperandSize::kByte)           \
+  /* Short operands. */                     \
+  V(MaybeReg16, OperandSize::kShort)        \
+  V(Reg16, OperandSize::kShort)             \
+  V(RegPair16, OperandSize::kShort)
+
+#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
+  /* Byte operands. */                       \
+  V(RegOut8, OperandSize::kByte)             \
+  V(RegOutPair8, OperandSize::kByte)         \
+  V(RegOutTriple8, OperandSize::kByte)       \
+  /* Short operands. */                      \
+  V(RegOut16, OperandSize::kShort)           \
+  V(RegOutPair16, OperandSize::kShort)       \
+  V(RegOutTriple16, OperandSize::kShort)
+
+#define SCALAR_OPERAND_TYPE_LIST(V) \
+  /* Byte operands. */              \
+  V(Idx8, OperandSize::kByte)       \
+  V(Imm8, OperandSize::kByte)       \
+  V(RegCount8, OperandSize::kByte)  \
+  /* Short operands. */             \
+  V(Idx16, OperandSize::kShort)     \
+  V(RegCount16, OperandSize::kShort)
+
+#define REGISTER_OPERAND_TYPE_LIST(V) \
+  REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
+  REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
+
+#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
+  INVALID_OPERAND_TYPE_LIST(V)            \
+  SCALAR_OPERAND_TYPE_LIST(V)
+
 // The list of operand types used by bytecodes.
-#define OPERAND_TYPE_LIST(V)       \
-                                   \
-  /* None operand. */              \
-  V(None, OperandSize::kNone)      \
-                                   \
-  /* Byte operands. */             \
-  V(Count8, OperandSize::kByte)    \
-  V(Imm8, OperandSize::kByte)      \
-  V(Idx8, OperandSize::kByte)      \
-  V(MaybeReg8, OperandSize::kByte) \
-  V(Reg8, OperandSize::kByte)      \
-  V(RegPair8, OperandSize::kByte)  \
-                                   \
-  /* Short operands. */            \
-  V(Count16, OperandSize::kShort)  \
-  V(Idx16, OperandSize::kShort)    \
-  V(Reg16, OperandSize::kShort)
+#define OPERAND_TYPE_LIST(V)        \
+  NON_REGISTER_OPERAND_TYPE_LIST(V) \
+  REGISTER_OPERAND_TYPE_LIST(V)
+
+// Define one debug break bytecode for each operands size.
+#define DEBUG_BREAK_BYTECODE_LIST(V)                                           \
+  V(DebugBreak0, OperandType::kNone)                                           \
+  V(DebugBreak1, OperandType::kReg8)                                           \
+  V(DebugBreak2, OperandType::kReg16)                                          \
+  V(DebugBreak3, OperandType::kReg16, OperandType::kReg8)                      \
+  V(DebugBreak4, OperandType::kReg16, OperandType::kReg16)                     \
+  V(DebugBreak5, OperandType::kReg16, OperandType::kReg16, OperandType::kReg8) \
+  V(DebugBreak6, OperandType::kReg16, OperandType::kReg16,                     \
+    OperandType::kReg16)                                                       \
+  V(DebugBreak7, OperandType::kReg16, OperandType::kReg16,                     \
+    OperandType::kReg16, OperandType::kReg8)                                   \
+  V(DebugBreak8, OperandType::kReg16, OperandType::kReg16,                     \
+    OperandType::kReg16, OperandType::kReg16)
 
 // The list of bytecodes which are interpreted by the interpreter.
 #define BYTECODE_LIST(V)                                                       \
@@ -49,14 +90,10 @@
   V(LdaConstantWide, OperandType::kIdx16)                                      \
                                                                                \
   /* Globals */                                                                \
-  V(LdaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8)                   \
-  V(LdaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8)                   \
-  V(LdaGlobalInsideTypeofSloppy, OperandType::kIdx8, OperandType::kIdx8)       \
-  V(LdaGlobalInsideTypeofStrict, OperandType::kIdx8, OperandType::kIdx8)       \
-  V(LdaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16)             \
-  V(LdaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16)             \
-  V(LdaGlobalInsideTypeofSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
-  V(LdaGlobalInsideTypeofStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
+  V(LdaGlobal, OperandType::kIdx8, OperandType::kIdx8)                         \
+  V(LdaGlobalInsideTypeof, OperandType::kIdx8, OperandType::kIdx8)             \
+  V(LdaGlobalWide, OperandType::kIdx16, OperandType::kIdx16)                   \
+  V(LdaGlobalInsideTypeofWide, OperandType::kIdx16, OperandType::kIdx16)       \
   V(StaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8)                   \
   V(StaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8)                   \
   V(StaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16)             \
@@ -82,25 +119,17 @@
                                                                                \
   /* Register-accumulator transfers */                                         \
   V(Ldar, OperandType::kReg8)                                                  \
-  V(Star, OperandType::kReg8)                                                  \
+  V(Star, OperandType::kRegOut8)                                               \
                                                                                \
   /* Register-register transfers */                                            \
-  V(Mov, OperandType::kReg8, OperandType::kReg8)                               \
-  V(Exchange, OperandType::kReg8, OperandType::kReg16)                         \
-  V(ExchangeWide, OperandType::kReg16, OperandType::kReg16)                    \
+  V(Mov, OperandType::kReg8, OperandType::kRegOut8)                            \
+  V(MovWide, OperandType::kReg16, OperandType::kRegOut16)                      \
                                                                                \
   /* LoadIC operations */                                                      \
-  V(LoadICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8)  \
-  V(LoadICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8)  \
-  V(KeyedLoadICSloppy, OperandType::kReg8, OperandType::kIdx8)                 \
-  V(KeyedLoadICStrict, OperandType::kReg8, OperandType::kIdx8)                 \
-  /* TODO(rmcilroy): Wide register operands too? */                            \
-  V(LoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16,                 \
-    OperandType::kIdx16)                                                       \
-  V(LoadICStrictWide, OperandType::kReg8, OperandType::kIdx16,                 \
-    OperandType::kIdx16)                                                       \
-  V(KeyedLoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16)            \
-  V(KeyedLoadICStrictWide, OperandType::kReg8, OperandType::kIdx16)            \
+  V(LoadIC, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8)        \
+  V(KeyedLoadIC, OperandType::kReg8, OperandType::kIdx8)                       \
+  V(LoadICWide, OperandType::kReg8, OperandType::kIdx16, OperandType::kIdx16)  \
+  V(KeyedLoadICWide, OperandType::kReg8, OperandType::kIdx16)                  \
                                                                                \
   /* StoreIC operations */                                                     \
   V(StoreICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
@@ -109,7 +138,6 @@
     OperandType::kIdx8)                                                        \
   V(KeyedStoreICStrict, OperandType::kReg8, OperandType::kReg8,                \
     OperandType::kIdx8)                                                        \
-  /* TODO(rmcilroy): Wide register operands too? */                            \
   V(StoreICSloppyWide, OperandType::kReg8, OperandType::kIdx16,                \
     OperandType::kIdx16)                                                       \
   V(StoreICStrictWide, OperandType::kReg8, OperandType::kIdx16,                \
@@ -139,22 +167,33 @@
   V(TypeOf, OperandType::kNone)                                                \
   V(DeletePropertyStrict, OperandType::kReg8)                                  \
   V(DeletePropertySloppy, OperandType::kReg8)                                  \
-  V(DeleteLookupSlot, OperandType::kNone)                                      \
                                                                                \
   /* Call operations */                                                        \
-  V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kCount8,        \
+  V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8,     \
     OperandType::kIdx8)                                                        \
-  V(CallWide, OperandType::kReg8, OperandType::kReg8, OperandType::kCount16,   \
-    OperandType::kIdx16)                                                       \
+  V(CallWide, OperandType::kReg16, OperandType::kReg16,                        \
+    OperandType::kRegCount16, OperandType::kIdx16)                             \
+  V(TailCall, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8, \
+    OperandType::kIdx8)                                                        \
+  V(TailCallWide, OperandType::kReg16, OperandType::kReg16,                    \
+    OperandType::kRegCount16, OperandType::kIdx16)                             \
   V(CallRuntime, OperandType::kIdx16, OperandType::kMaybeReg8,                 \
-    OperandType::kCount8)                                                      \
+    OperandType::kRegCount8)                                                   \
+  V(CallRuntimeWide, OperandType::kIdx16, OperandType::kMaybeReg16,            \
+    OperandType::kRegCount8)                                                   \
   V(CallRuntimeForPair, OperandType::kIdx16, OperandType::kMaybeReg8,          \
-    OperandType::kCount8, OperandType::kRegPair8)                              \
+    OperandType::kRegCount8, OperandType::kRegOutPair8)                        \
+  V(CallRuntimeForPairWide, OperandType::kIdx16, OperandType::kMaybeReg16,     \
+    OperandType::kRegCount8, OperandType::kRegOutPair16)                       \
   V(CallJSRuntime, OperandType::kIdx16, OperandType::kReg8,                    \
-    OperandType::kCount8)                                                      \
+    OperandType::kRegCount8)                                                   \
+  V(CallJSRuntimeWide, OperandType::kIdx16, OperandType::kReg16,               \
+    OperandType::kRegCount16)                                                  \
                                                                                \
   /* New operator */                                                           \
-  V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kCount8)    \
+  V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kRegCount8) \
+  V(NewWide, OperandType::kReg16, OperandType::kMaybeReg16,                    \
+    OperandType::kRegCount16)                                                  \
                                                                                \
   /* Test Operators */                                                         \
   V(TestEqual, OperandType::kReg8)                                             \
@@ -194,6 +233,7 @@
   /* Arguments allocation */                                                   \
   V(CreateMappedArguments, OperandType::kNone)                                 \
   V(CreateUnmappedArguments, OperandType::kNone)                               \
+  V(CreateRestParameter, OperandType::kNone)                                   \
                                                                                \
   /* Control Flow */                                                           \
   V(Jump, OperandType::kImm8)                                                  \
@@ -217,18 +257,30 @@
   V(JumpIfUndefined, OperandType::kImm8)                                       \
   V(JumpIfUndefinedConstant, OperandType::kIdx8)                               \
   V(JumpIfUndefinedConstantWide, OperandType::kIdx16)                          \
+  V(JumpIfNotHole, OperandType::kImm8)                                         \
+  V(JumpIfNotHoleConstant, OperandType::kIdx8)                                 \
+  V(JumpIfNotHoleConstantWide, OperandType::kIdx16)                            \
                                                                                \
   /* Complex flow control For..in */                                           \
-  V(ForInPrepare, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8)  \
+  V(ForInPrepare, OperandType::kRegOutTriple8)                                 \
+  V(ForInPrepareWide, OperandType::kRegOutTriple16)                            \
   V(ForInDone, OperandType::kReg8, OperandType::kReg8)                         \
-  V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8,     \
-    OperandType::kReg8)                                                        \
+  V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kRegPair8) \
+  V(ForInNextWide, OperandType::kReg16, OperandType::kReg16,                   \
+    OperandType::kRegPair16)                                                   \
   V(ForInStep, OperandType::kReg8)                                             \
                                                                                \
+  /* Perform a stack guard check */                                            \
+  V(StackCheck, OperandType::kNone)                                            \
+                                                                               \
   /* Non-local flow control */                                                 \
   V(Throw, OperandType::kNone)                                                 \
-  V(Return, OperandType::kNone)
-
+  V(ReThrow, OperandType::kNone)                                               \
+  V(Return, OperandType::kNone)                                                \
+                                                                               \
+  /* Debugger */                                                               \
+  V(Debugger, OperandType::kNone)                                              \
+  DEBUG_BREAK_BYTECODE_LIST(V)
 
 // Enumeration of the size classes of operand types used by bytecodes.
 enum class OperandSize : uint8_t {
@@ -268,28 +320,30 @@
 // in its stack-frame. Register hold parameters, this, and expression values.
 class Register {
  public:
-  Register() : index_(kIllegalIndex) {}
+  explicit Register(int index = kInvalidIndex) : index_(index) {}
 
-  explicit Register(int index) : index_(index) {}
-
-  int index() const {
-    DCHECK(index_ != kIllegalIndex);
-    return index_;
-  }
+  int index() const { return index_; }
   bool is_parameter() const { return index() < 0; }
-  bool is_valid() const { return index_ != kIllegalIndex; }
+  bool is_valid() const { return index_ != kInvalidIndex; }
+  bool is_byte_operand() const;
+  bool is_short_operand() const;
 
   static Register FromParameterIndex(int index, int parameter_count);
   int ToParameterIndex(int parameter_count) const;
   static int MaxParameterIndex();
+  static int MaxRegisterIndex();
+  static int MaxRegisterIndexForByteOperand();
+
+  // Returns an invalid register.
+  static Register invalid_value() { return Register(); }
 
   // Returns the register for the function's closure object.
   static Register function_closure();
   bool is_function_closure() const;
 
-  // Returns the register for the function's outer context.
-  static Register function_context();
-  bool is_function_context() const;
+  // Returns the register which holds the current context object.
+  static Register current_context();
+  bool is_current_context() const;
 
   // Returns the register for the incoming new target value.
   static Register new_target();
@@ -301,11 +355,16 @@
   static Register FromWideOperand(uint16_t operand);
   uint16_t ToWideOperand() const;
 
+  static Register FromRawOperand(uint32_t raw_operand);
+  uint32_t ToRawOperand() const;
+
   static bool AreContiguous(Register reg1, Register reg2,
                             Register reg3 = Register(),
                             Register reg4 = Register(),
                             Register reg5 = Register());
 
+  std::string ToString(int parameter_count);
+
   bool operator==(const Register& other) const {
     return index() == other.index();
   }
@@ -318,9 +377,15 @@
   bool operator<=(const Register& other) const {
     return index() <= other.index();
   }
+  bool operator>(const Register& other) const {
+    return index() > other.index();
+  }
+  bool operator>=(const Register& other) const {
+    return index() >= other.index();
+  }
 
  private:
-  static const int kIllegalIndex = kMaxInt;
+  static const int kInvalidIndex = kMaxInt;
 
   void* operator new(size_t size);
   void operator delete(void* p);
@@ -349,57 +414,96 @@
   // Returns the number of operands expected by |bytecode|.
   static int NumberOfOperands(Bytecode bytecode);
 
-  // Return the i-th operand of |bytecode|.
+  // Returns the number of register operands expected by |bytecode|.
+  static int NumberOfRegisterOperands(Bytecode bytecode);
+
+  // Returns the i-th operand of |bytecode|.
   static OperandType GetOperandType(Bytecode bytecode, int i);
 
-  // Return the size of the i-th operand of |bytecode|.
+  // Returns the size of the i-th operand of |bytecode|.
   static OperandSize GetOperandSize(Bytecode bytecode, int i);
 
   // Returns the offset of the i-th operand of |bytecode| relative to the start
   // of the bytecode.
   static int GetOperandOffset(Bytecode bytecode, int i);
 
+  // Returns a zero-based bitmap of the register operand positions of
+  // |bytecode|.
+  static int GetRegisterOperandBitmap(Bytecode bytecode);
+
+  // Returns a debug break bytecode with a matching operand size.
+  static Bytecode GetDebugBreak(Bytecode bytecode);
+
   // Returns the size of the bytecode including its operands.
   static int Size(Bytecode bytecode);
 
   // Returns the size of |operand|.
   static OperandSize SizeOfOperand(OperandType operand);
 
-  // Return true if the bytecode is a conditional jump taking
+  // Returns true if the bytecode is a conditional jump taking
   // an immediate byte operand (OperandType::kImm8).
   static bool IsConditionalJumpImmediate(Bytecode bytecode);
 
-  // Return true if the bytecode is a conditional jump taking
+  // Returns true if the bytecode is a conditional jump taking
   // a constant pool entry (OperandType::kIdx8).
   static bool IsConditionalJumpConstant(Bytecode bytecode);
 
-  // Return true if the bytecode is a conditional jump taking
+  // Returns true if the bytecode is a conditional jump taking
   // a constant pool entry (OperandType::kIdx16).
   static bool IsConditionalJumpConstantWide(Bytecode bytecode);
 
-  // Return true if the bytecode is a conditional jump taking
+  // Returns true if the bytecode is a conditional jump taking
   // any kind of operand.
   static bool IsConditionalJump(Bytecode bytecode);
 
-  // Return true if the bytecode is a jump or a conditional jump taking
+  // Returns true if the bytecode is a jump or a conditional jump taking
   // an immediate byte operand (OperandType::kImm8).
   static bool IsJumpImmediate(Bytecode bytecode);
 
-  // Return true if the bytecode is a jump or conditional jump taking a
+  // Returns true if the bytecode is a jump or conditional jump taking a
   // constant pool entry (OperandType::kIdx8).
   static bool IsJumpConstant(Bytecode bytecode);
 
-  // Return true if the bytecode is a jump or conditional jump taking a
+  // Returns true if the bytecode is a jump or conditional jump taking a
   // constant pool entry (OperandType::kIdx16).
   static bool IsJumpConstantWide(Bytecode bytecode);
 
-  // Return true if the bytecode is a jump or conditional jump taking
+  // Returns true if the bytecode is a jump or conditional jump taking
   // any kind of operand.
   static bool IsJump(Bytecode bytecode);
 
-  // Return true if the bytecode is a conditional jump, a jump, or a return.
+  // Returns true if the bytecode is a conditional jump, a jump, or a return.
   static bool IsJumpOrReturn(Bytecode bytecode);
 
+  // Returns true if the bytecode is a call or a constructor call.
+  static bool IsCallOrNew(Bytecode bytecode);
+
+  // Returns true if the bytecode is a debug break.
+  static bool IsDebugBreak(Bytecode bytecode);
+
+  // Returns true if |operand_type| is a register index operand (kIdx8/kIdx16).
+  static bool IsIndexOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| represents an immediate.
+  static bool IsImmediateOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| is a register count operand
+  // (kRegCount8/kRegCount16).
+  static bool IsRegisterCountOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| is any type of register operand.
+  static bool IsRegisterOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| represents a register used as an input.
+  static bool IsRegisterInputOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| represents a register used as an output.
+  static bool IsRegisterOutputOperandType(OperandType operand_type);
+
+  // Returns true if |operand_type| is a maybe register operand
+  // (kMaybeReg8/kMaybeReg16).
+  static bool IsMaybeRegisterOperandType(OperandType operand_type);
+
   // Decode a single bytecode and operands to |os|.
   static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
                               int number_of_parameters);
diff --git a/src/interpreter/constant-array-builder.cc b/src/interpreter/constant-array-builder.cc
index 2586e1f..e8b1281 100644
--- a/src/interpreter/constant-array-builder.cc
+++ b/src/interpreter/constant-array-builder.cc
@@ -85,19 +85,19 @@
   }
 }
 
-
-Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Factory* factory) const {
-  Handle<FixedArray> fixed_array =
-      factory->NewFixedArray(static_cast<int>(size()), PretenureFlag::TENURED);
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray() {
+  Handle<FixedArray> fixed_array = isolate_->factory()->NewFixedArray(
+      static_cast<int>(size()), PretenureFlag::TENURED);
   for (int i = 0; i < fixed_array->length(); i++) {
     fixed_array->set(i, *At(static_cast<size_t>(i)));
   }
+  constants_map()->Clear();
   return fixed_array;
 }
 
 
 size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
-  index_t* entry = constants_map_.Find(object);
+  index_t* entry = constants_map()->Find(object);
   return (entry == nullptr) ? AllocateEntry(object) : *entry;
 }
 
@@ -106,7 +106,7 @@
     Handle<Object> object) {
   DCHECK(!object->IsOddball());
   size_t index;
-  index_t* entry = constants_map_.Get(object);
+  index_t* entry = constants_map()->Get(object);
   if (idx8_slice_.available() > 0) {
     index = idx8_slice_.Allocate(object);
   } else {
@@ -136,7 +136,7 @@
                                                  Handle<Object> object) {
   DiscardReservedEntry(operand_size);
   size_t index;
-  index_t* entry = constants_map_.Find(object);
+  index_t* entry = constants_map()->Find(object);
   if (nullptr == entry) {
     index = AllocateEntry(object);
   } else {
diff --git a/src/interpreter/constant-array-builder.h b/src/interpreter/constant-array-builder.h
index c882b1d..d7e41e3 100644
--- a/src/interpreter/constant-array-builder.h
+++ b/src/interpreter/constant-array-builder.h
@@ -12,13 +12,15 @@
 namespace v8 {
 namespace internal {
 
-class Factory;
 class Isolate;
 
 namespace interpreter {
 
-// A helper class for constructing constant arrays for the interpreter.
-class ConstantArrayBuilder final : public ZoneObject {
+// A helper class for constructing constant arrays for the
+// interpreter. Each instance of this class is intended to be used to
+// generate exactly one FixedArray of constants via the ToFixedArray
+// method.
+class ConstantArrayBuilder final BASE_EMBEDDED {
  public:
   // Capacity of the 8-bit operand slice.
   static const size_t kLowCapacity = 1u << kBitsPerByte;
@@ -32,7 +34,7 @@
   ConstantArrayBuilder(Isolate* isolate, Zone* zone);
 
   // Generate a fixed array of constants based on inserted objects.
-  Handle<FixedArray> ToFixedArray(Factory* factory) const;
+  Handle<FixedArray> ToFixedArray();
 
   // Returns the object in the constant pool array that at index
   // |index|.
@@ -84,6 +86,8 @@
     DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
   };
 
+  IdentityMap<index_t>* constants_map() { return &constants_map_; }
+
   Isolate* isolate_;
   ConstantArraySlice idx8_slice_;
   ConstantArraySlice idx16_slice_;
diff --git a/src/interpreter/control-flow-builders.cc b/src/interpreter/control-flow-builders.cc
index 99066e8..6510aa4 100644
--- a/src/interpreter/control-flow-builders.cc
+++ b/src/interpreter/control-flow-builders.cc
@@ -137,6 +137,57 @@
   builder()->Bind(&site);
 }
 
+
+void TryCatchBuilder::BeginTry(Register context) {
+  builder()->MarkTryBegin(handler_id_, context);
+}
+
+
+void TryCatchBuilder::EndTry() {
+  builder()->MarkTryEnd(handler_id_);
+  builder()->Jump(&exit_);
+  builder()->Bind(&handler_);
+  builder()->MarkHandler(handler_id_, true);
+}
+
+
+void TryCatchBuilder::EndCatch() { builder()->Bind(&exit_); }
+
+
+void TryFinallyBuilder::BeginTry(Register context) {
+  builder()->MarkTryBegin(handler_id_, context);
+}
+
+
+void TryFinallyBuilder::LeaveTry() {
+  finalization_sites_.push_back(BytecodeLabel());
+  builder()->Jump(&finalization_sites_.back());
+}
+
+
+void TryFinallyBuilder::EndTry() {
+  builder()->MarkTryEnd(handler_id_);
+}
+
+
+void TryFinallyBuilder::BeginHandler() {
+  builder()->Bind(&handler_);
+  builder()->MarkHandler(handler_id_, will_catch_);
+}
+
+
+void TryFinallyBuilder::BeginFinally() {
+  for (size_t i = 0; i < finalization_sites_.size(); i++) {
+    BytecodeLabel& site = finalization_sites_.at(i);
+    builder()->Bind(&site);
+  }
+}
+
+
+void TryFinallyBuilder::EndFinally() {
+  // Nothing to be done here.
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/control-flow-builders.h b/src/interpreter/control-flow-builders.h
index 24a7dfe..e4d376b 100644
--- a/src/interpreter/control-flow-builders.h
+++ b/src/interpreter/control-flow-builders.h
@@ -144,6 +144,53 @@
   ZoneVector<BytecodeLabel> case_sites_;
 };
 
+
+// A class to help with co-ordinating control flow in try-catch statements.
+class TryCatchBuilder final : public ControlFlowBuilder {
+ public:
+  explicit TryCatchBuilder(BytecodeArrayBuilder* builder)
+      : ControlFlowBuilder(builder), handler_id_(builder->NewHandlerEntry()) {}
+
+  void BeginTry(Register context);
+  void EndTry();
+  void EndCatch();
+
+ private:
+  int handler_id_;
+  BytecodeLabel handler_;
+  BytecodeLabel exit_;
+};
+
+
+// A class to help with co-ordinating control flow in try-finally statements.
+class TryFinallyBuilder final : public ControlFlowBuilder {
+ public:
+  explicit TryFinallyBuilder(BytecodeArrayBuilder* builder, bool will_catch)
+      : ControlFlowBuilder(builder),
+        handler_id_(builder->NewHandlerEntry()),
+        finalization_sites_(builder->zone()),
+        will_catch_(will_catch) {}
+
+  void BeginTry(Register context);
+  void LeaveTry();
+  void EndTry();
+  void BeginHandler();
+  void BeginFinally();
+  void EndFinally();
+
+ private:
+  int handler_id_;
+  BytecodeLabel handler_;
+
+  // Unbound labels that identify jumps to the finally block in the code.
+  ZoneVector<BytecodeLabel> finalization_sites_;
+
+  // Conservative prediction of whether exceptions thrown into the handler for
+  // this finally block will be caught. Note that such a prediction depends on
+  // whether this try-finally is nested inside a surrounding try-catch.
+  bool will_catch_;
+};
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/handler-table-builder.cc b/src/interpreter/handler-table-builder.cc
new file mode 100644
index 0000000..374089b
--- /dev/null
+++ b/src/interpreter/handler-table-builder.cc
@@ -0,0 +1,73 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/handler-table-builder.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+HandlerTableBuilder::HandlerTableBuilder(Isolate* isolate, Zone* zone)
+    : isolate_(isolate), entries_(zone) {}
+
+Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable() {
+  int handler_table_size = static_cast<int>(entries_.size());
+  Handle<HandlerTable> table =
+      Handle<HandlerTable>::cast(isolate_->factory()->NewFixedArray(
+          HandlerTable::LengthForRange(handler_table_size), TENURED));
+  for (int i = 0; i < handler_table_size; ++i) {
+    Entry& entry = entries_[i];
+    HandlerTable::CatchPrediction pred =
+        entry.will_catch ? HandlerTable::CAUGHT : HandlerTable::UNCAUGHT;
+    table->SetRangeStart(i, static_cast<int>(entry.offset_start));
+    table->SetRangeEnd(i, static_cast<int>(entry.offset_end));
+    table->SetRangeHandler(i, static_cast<int>(entry.offset_target), pred);
+    table->SetRangeData(i, entry.context.index());
+  }
+  return table;
+}
+
+
+int HandlerTableBuilder::NewHandlerEntry() {
+  int handler_id = static_cast<int>(entries_.size());
+  Entry entry = {0, 0, 0, Register(), false};
+  entries_.push_back(entry);
+  return handler_id;
+}
+
+
+void HandlerTableBuilder::SetTryRegionStart(int handler_id, size_t offset) {
+  DCHECK(Smi::IsValid(offset));  // Encoding of handler table requires this.
+  entries_[handler_id].offset_start = offset;
+}
+
+
+void HandlerTableBuilder::SetTryRegionEnd(int handler_id, size_t offset) {
+  DCHECK(Smi::IsValid(offset));  // Encoding of handler table requires this.
+  entries_[handler_id].offset_end = offset;
+}
+
+
+void HandlerTableBuilder::SetHandlerTarget(int handler_id, size_t offset) {
+  DCHECK(Smi::IsValid(offset));  // Encoding of handler table requires this.
+  entries_[handler_id].offset_target = offset;
+}
+
+
+void HandlerTableBuilder::SetPrediction(int handler_id, bool will_catch) {
+  entries_[handler_id].will_catch = will_catch;
+}
+
+
+void HandlerTableBuilder::SetContextRegister(int handler_id, Register reg) {
+  entries_[handler_id].context = reg;
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/handler-table-builder.h b/src/interpreter/handler-table-builder.h
new file mode 100644
index 0000000..7356e37
--- /dev/null
+++ b/src/interpreter/handler-table-builder.h
@@ -0,0 +1,61 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
+#define V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
+
+#include "src/handles.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class HandlerTable;
+class Isolate;
+
+namespace interpreter {
+
+// A helper class for constructing exception handler tables for the interpreter.
+class HandlerTableBuilder final BASE_EMBEDDED {
+ public:
+  HandlerTableBuilder(Isolate* isolate, Zone* zone);
+
+  // Builds the actual handler table by copying the current values into a heap
+  // object. Any further mutations to the builder won't be reflected.
+  Handle<HandlerTable> ToHandlerTable();
+
+  // Creates a new handler table entry and returns a {hander_id} identifying the
+  // entry, so that it can be referenced by below setter functions.
+  int NewHandlerEntry();
+
+  // Setter functions that modify certain values within the handler table entry
+  // being referenced by the given {handler_id}. All values will be encoded by
+  // the resulting {HandlerTable} class when copied into the heap.
+  void SetTryRegionStart(int handler_id, size_t offset);
+  void SetTryRegionEnd(int handler_id, size_t offset);
+  void SetHandlerTarget(int handler_id, size_t offset);
+  void SetPrediction(int handler_id, bool will_catch);
+  void SetContextRegister(int handler_id, Register reg);
+
+ private:
+  struct Entry {
+    size_t offset_start;   // Bytecode offset starting try-region.
+    size_t offset_end;     // Bytecode offset ending try-region.
+    size_t offset_target;  // Bytecode offset of handler target.
+    Register context;      // Register holding context for handler.
+    bool will_catch;       // Optimistic prediction for handler.
+  };
+
+  Isolate* isolate_;
+  ZoneVector<Entry> entries_;
+
+  DISALLOW_COPY_AND_ASSIGN(HandlerTableBuilder);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
new file mode 100644
index 0000000..440e879
--- /dev/null
+++ b/src/interpreter/interpreter-assembler.cc
@@ -0,0 +1,546 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/interpreter-assembler.h"
+
+#include <ostream>
+
+#include "src/code-factory.h"
+#include "src/frames.h"
+#include "src/interface-descriptors.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
+#include "src/machine-type.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using compiler::Node;
+
+InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
+                                           Bytecode bytecode)
+    : compiler::CodeStubAssembler(
+          isolate, zone, InterpreterDispatchDescriptor(isolate),
+          Code::ComputeFlags(Code::STUB), Bytecodes::ToString(bytecode), 0),
+      bytecode_(bytecode),
+      accumulator_(this, MachineRepresentation::kTagged),
+      context_(this, MachineRepresentation::kTagged),
+      bytecode_array_(this, MachineRepresentation::kTagged),
+      disable_stack_check_across_call_(false),
+      stack_pointer_before_call_(nullptr) {
+  accumulator_.Bind(
+      Parameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
+  context_.Bind(Parameter(InterpreterDispatchDescriptor::kContextParameter));
+  bytecode_array_.Bind(
+      Parameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter));
+  if (FLAG_trace_ignition) {
+    TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
+  }
+}
+
+InterpreterAssembler::~InterpreterAssembler() {}
+
+Node* InterpreterAssembler::GetAccumulator() { return accumulator_.value(); }
+
+void InterpreterAssembler::SetAccumulator(Node* value) {
+  accumulator_.Bind(value);
+}
+
+Node* InterpreterAssembler::GetContext() { return context_.value(); }
+
+void InterpreterAssembler::SetContext(Node* value) {
+  StoreRegister(value, Register::current_context());
+  context_.Bind(value);
+}
+
+Node* InterpreterAssembler::BytecodeOffset() {
+  return Parameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter);
+}
+
+Node* InterpreterAssembler::RegisterFileRawPointer() {
+  return Parameter(InterpreterDispatchDescriptor::kRegisterFileParameter);
+}
+
+Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
+  return bytecode_array_.value();
+}
+
+Node* InterpreterAssembler::DispatchTableRawPointer() {
+  return Parameter(InterpreterDispatchDescriptor::kDispatchTableParameter);
+}
+
+Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
+  return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
+}
+
+Node* InterpreterAssembler::LoadRegister(int offset) {
+  return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
+              Int32Constant(offset));
+}
+
+Node* InterpreterAssembler::LoadRegister(Register reg) {
+  return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
+}
+
+Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
+  return WordShl(index, kPointerSizeLog2);
+}
+
+Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
+  return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
+              RegisterFrameOffset(reg_index));
+}
+
+Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
+  return StoreNoWriteBarrier(MachineRepresentation::kTagged,
+                             RegisterFileRawPointer(), Int32Constant(offset),
+                             value);
+}
+
+Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
+  return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
+}
+
+Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
+  return StoreNoWriteBarrier(MachineRepresentation::kTagged,
+                             RegisterFileRawPointer(),
+                             RegisterFrameOffset(reg_index), value);
+}
+
+Node* InterpreterAssembler::NextRegister(Node* reg_index) {
+  // Register indexes are negative, so the next index is minus one.
+  return IntPtrAdd(reg_index, Int32Constant(-1));
+}
+
+Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kByte,
+            Bytecodes::GetOperandSize(bytecode_, operand_index));
+  return Load(
+      MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+      IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
+                                      bytecode_, operand_index))));
+}
+
+Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kByte,
+            Bytecodes::GetOperandSize(bytecode_, operand_index));
+  Node* load = Load(
+      MachineType::Int8(), BytecodeArrayTaggedPointer(),
+      IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
+                                      bytecode_, operand_index))));
+  // Ensure that we sign extend to full pointer size
+  if (kPointerSize == 8) {
+    load = ChangeInt32ToInt64(load);
+  }
+  return load;
+}
+
+Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kShort,
+            Bytecodes::GetOperandSize(bytecode_, operand_index));
+  if (TargetSupportsUnalignedAccess()) {
+    return Load(
+        MachineType::Uint16(), BytecodeArrayTaggedPointer(),
+        IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
+                                        bytecode_, operand_index))));
+  } else {
+    int offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
+    Node* first_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+                            IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
+    Node* second_byte =
+        Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+             IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
+#if V8_TARGET_LITTLE_ENDIAN
+    return WordOr(WordShl(second_byte, kBitsPerByte), first_byte);
+#elif V8_TARGET_BIG_ENDIAN
+    return WordOr(WordShl(first_byte, kBitsPerByte), second_byte);
+#else
+#error "Unknown Architecture"
+#endif
+  }
+}
+
+Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
+    int operand_index) {
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+  DCHECK_EQ(OperandSize::kShort,
+            Bytecodes::GetOperandSize(bytecode_, operand_index));
+  int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
+  Node* load;
+  if (TargetSupportsUnalignedAccess()) {
+    load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
+                IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
+  } else {
+#if V8_TARGET_LITTLE_ENDIAN
+    Node* hi_byte_offset = Int32Constant(operand_offset + 1);
+    Node* lo_byte_offset = Int32Constant(operand_offset);
+#elif V8_TARGET_BIG_ENDIAN
+    Node* hi_byte_offset = Int32Constant(operand_offset);
+    Node* lo_byte_offset = Int32Constant(operand_offset + 1);
+#else
+#error "Unknown Architecture"
+#endif
+    Node* hi_byte = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+                         IntPtrAdd(BytecodeOffset(), hi_byte_offset));
+    Node* lo_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+                         IntPtrAdd(BytecodeOffset(), lo_byte_offset));
+    hi_byte = Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
+    load = Word32Or(hi_byte, lo_byte);
+  }
+
+  // Ensure that we sign extend to full pointer size
+  if (kPointerSize == 8) {
+    load = ChangeInt32ToInt64(load);
+  }
+  return load;
+}
+
+Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
+  switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+    case OperandSize::kByte:
+      DCHECK_EQ(OperandType::kRegCount8,
+                Bytecodes::GetOperandType(bytecode_, operand_index));
+      return BytecodeOperand(operand_index);
+    case OperandSize::kShort:
+      DCHECK_EQ(OperandType::kRegCount16,
+                Bytecodes::GetOperandType(bytecode_, operand_index));
+      return BytecodeOperandShort(operand_index);
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return nullptr;
+}
+
+Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
+  DCHECK_EQ(OperandType::kImm8,
+            Bytecodes::GetOperandType(bytecode_, operand_index));
+  return BytecodeOperandSignExtended(operand_index);
+}
+
+Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
+  switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+    case OperandSize::kByte:
+      DCHECK_EQ(OperandType::kIdx8,
+                Bytecodes::GetOperandType(bytecode_, operand_index));
+      return BytecodeOperand(operand_index);
+    case OperandSize::kShort:
+      DCHECK_EQ(OperandType::kIdx16,
+                Bytecodes::GetOperandType(bytecode_, operand_index));
+      return BytecodeOperandShort(operand_index);
+    case OperandSize::kNone:
+      UNREACHABLE();
+  }
+  return nullptr;
+}
+
+Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(bytecode_, operand_index);
+  if (Bytecodes::IsRegisterOperandType(operand_type)) {
+    OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
+    if (operand_size == OperandSize::kByte) {
+      return BytecodeOperandSignExtended(operand_index);
+    } else if (operand_size == OperandSize::kShort) {
+      return BytecodeOperandShortSignExtended(operand_index);
+    }
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
+  Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
+                                        BytecodeArray::kConstantPoolOffset);
+  Node* entry_offset =
+      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+                WordShl(index, kPointerSizeLog2));
+  return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
+}
+
+Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
+                                                  int index) {
+  Node* entry_offset =
+      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+                WordShl(Int32Constant(index), kPointerSizeLog2));
+  return Load(MachineType::AnyTagged(), fixed_array, entry_offset);
+}
+
+Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
+  return Load(MachineType::AnyTagged(), object,
+              IntPtrConstant(offset - kHeapObjectTag));
+}
+
+Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
+  return Load(MachineType::AnyTagged(), context,
+              IntPtrConstant(Context::SlotOffset(slot_index)));
+}
+
+Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
+  Node* offset =
+      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+  return Load(MachineType::AnyTagged(), context, offset);
+}
+
+Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
+                                             Node* value) {
+  Node* offset =
+      IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+                Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+  return Store(MachineRepresentation::kTagged, context, offset, value);
+}
+
+Node* InterpreterAssembler::LoadTypeFeedbackVector() {
+  Node* function = Load(
+      MachineType::AnyTagged(), RegisterFileRawPointer(),
+      IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
+  Node* shared_info =
+      LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
+  Node* vector =
+      LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
+  return vector;
+}
+
+void InterpreterAssembler::CallPrologue() {
+  StoreRegister(SmiTag(BytecodeOffset()),
+                InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
+  StoreRegister(BytecodeArrayTaggedPointer(),
+                InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer);
+
+  if (FLAG_debug_code && !disable_stack_check_across_call_) {
+    DCHECK(stack_pointer_before_call_ == nullptr);
+    stack_pointer_before_call_ = LoadStackPointer();
+  }
+}
+
+void InterpreterAssembler::CallEpilogue() {
+  if (FLAG_debug_code && !disable_stack_check_across_call_) {
+    Node* stack_pointer_after_call = LoadStackPointer();
+    Node* stack_pointer_before_call = stack_pointer_before_call_;
+    stack_pointer_before_call_ = nullptr;
+    AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
+                        kUnexpectedStackPointer);
+  }
+
+  // Restore bytecode array from stack frame in case the debugger has swapped us
+  // to the patched debugger bytecode array.
+  bytecode_array_.Bind(LoadRegister(
+      InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+}
+
+Node* InterpreterAssembler::CallJS(Node* function, Node* context,
+                                   Node* first_arg, Node* arg_count,
+                                   TailCallMode tail_call_mode) {
+  Callable callable =
+      CodeFactory::InterpreterPushArgsAndCall(isolate(), tail_call_mode);
+  Node* code_target = HeapConstant(callable.code());
+  return CallStub(callable.descriptor(), code_target, context, arg_count,
+                  first_arg, function);
+}
+
+Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
+                                          Node* new_target, Node* first_arg,
+                                          Node* arg_count) {
+  Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
+  Node* code_target = HeapConstant(callable.code());
+  return CallStub(callable.descriptor(), code_target, context, arg_count,
+                  new_target, constructor, first_arg);
+}
+
+Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
+                                         Node* first_arg, Node* arg_count,
+                                         int result_size) {
+  Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
+  Node* code_target = HeapConstant(callable.code());
+
+  // Get the function entry from the function id.
+  Node* function_table = ExternalConstant(
+      ExternalReference::runtime_function_table_address(isolate()));
+  Node* function_offset =
+      Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
+  Node* function = IntPtrAdd(function_table, function_offset);
+  Node* function_entry =
+      Load(MachineType::Pointer(), function,
+           Int32Constant(offsetof(Runtime::Function, entry)));
+
+  return CallStub(callable.descriptor(), code_target, context, arg_count,
+                  first_arg, function_entry, result_size);
+}
+
+void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
+  CodeStubAssembler::Label ok(this);
+  CodeStubAssembler::Label interrupt_check(this);
+  CodeStubAssembler::Label end(this);
+  Node* budget_offset =
+      IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
+
+  // Update budget by |weight| and check if it reaches zero.
+  Node* old_budget =
+      Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
+  Node* new_budget = Int32Add(old_budget, weight);
+  Node* condition = Int32GreaterThanOrEqual(new_budget, Int32Constant(0));
+  Branch(condition, &ok, &interrupt_check);
+
+  // Perform interrupt and reset budget.
+  Bind(&interrupt_check);
+  CallRuntime(Runtime::kInterrupt, GetContext());
+  StoreNoWriteBarrier(MachineRepresentation::kWord32,
+                      BytecodeArrayTaggedPointer(), budget_offset,
+                      Int32Constant(Interpreter::InterruptBudget()));
+  Goto(&end);
+
+  // Update budget.
+  Bind(&ok);
+  StoreNoWriteBarrier(MachineRepresentation::kWord32,
+                      BytecodeArrayTaggedPointer(), budget_offset, new_budget);
+  Goto(&end);
+  Bind(&end);
+}
+
+Node* InterpreterAssembler::Advance(int delta) {
+  return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
+}
+
+Node* InterpreterAssembler::Advance(Node* delta) {
+  return IntPtrAdd(BytecodeOffset(), delta);
+}
+
+void InterpreterAssembler::Jump(Node* delta) {
+  UpdateInterruptBudget(delta);
+  DispatchTo(Advance(delta));
+}
+
+void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
+  CodeStubAssembler::Label match(this);
+  CodeStubAssembler::Label no_match(this);
+
+  Branch(condition, &match, &no_match);
+  Bind(&match);
+  Jump(delta);
+  Bind(&no_match);
+  Dispatch();
+}
+
+void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
+  JumpConditional(WordEqual(lhs, rhs), delta);
+}
+
+void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
+                                              Node* delta) {
+  JumpConditional(WordNotEqual(lhs, rhs), delta);
+}
+
+void InterpreterAssembler::Dispatch() {
+  DispatchTo(Advance(Bytecodes::Size(bytecode_)));
+}
+
+void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
+  Node* target_bytecode = Load(
+      MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
+
+  // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
+  // from code object on every dispatch.
+  Node* target_code_object =
+      Load(MachineType::Pointer(), DispatchTableRawPointer(),
+           Word32Shl(target_bytecode, Int32Constant(kPointerSizeLog2)));
+
+  DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
+}
+
+void InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
+                                                     Node* bytecode_offset) {
+  if (FLAG_trace_ignition) {
+    TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
+  }
+
+  InterpreterDispatchDescriptor descriptor(isolate());
+  Node* args[] = {GetAccumulator(),          RegisterFileRawPointer(),
+                  bytecode_offset,           BytecodeArrayTaggedPointer(),
+                  DispatchTableRawPointer(), GetContext()};
+  TailCall(descriptor, handler, args, 0);
+}
+
+void InterpreterAssembler::InterpreterReturn() {
+  // TODO(rmcilroy): Investigate whether it is worth supporting self
+  // optimization of primitive functions like FullCodegen.
+
+  // Update profiling count by -BytecodeOffset to simulate backedge to start of
+  // function.
+  Node* profiling_weight =
+      Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
+               BytecodeOffset());
+  UpdateInterruptBudget(profiling_weight);
+
+  Node* exit_trampoline_code_object =
+      HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
+  DispatchToBytecodeHandler(exit_trampoline_code_object);
+}
+
+void InterpreterAssembler::StackCheck() {
+  CodeStubAssembler::Label end(this);
+  CodeStubAssembler::Label ok(this);
+  CodeStubAssembler::Label stack_guard(this);
+
+  Node* sp = LoadStackPointer();
+  Node* stack_limit = Load(
+      MachineType::Pointer(),
+      ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
+  Node* condition = UintPtrGreaterThanOrEqual(sp, stack_limit);
+  Branch(condition, &ok, &stack_guard);
+  Bind(&stack_guard);
+  CallRuntime(Runtime::kStackGuard, GetContext());
+  Goto(&end);
+  Bind(&ok);
+  Goto(&end);
+  Bind(&end);
+}
+
+void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
+  disable_stack_check_across_call_ = true;
+  Node* abort_id = SmiTag(Int32Constant(bailout_reason));
+  Node* ret_value = CallRuntime(Runtime::kAbort, GetContext(), abort_id);
+  disable_stack_check_across_call_ = false;
+  // Unreached, but keeps turbofan happy.
+  Return(ret_value);
+}
+
+void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
+                                               BailoutReason bailout_reason) {
+  CodeStubAssembler::Label match(this);
+  CodeStubAssembler::Label no_match(this);
+
+  Node* condition = WordEqual(lhs, rhs);
+  Branch(condition, &match, &no_match);
+  Bind(&no_match);
+  Abort(bailout_reason);
+  Bind(&match);
+}
+
+void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
+  CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
+              SmiTag(BytecodeOffset()), GetAccumulator());
+}
+
+// static
+bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+  return false;
+#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
+  return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
+  return true;
+#else
+#error "Unknown Architecture"
+#endif
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
new file mode 100644
index 0000000..9600dfb
--- /dev/null
+++ b/src/interpreter/interpreter-assembler.h
@@ -0,0 +1,205 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
+#define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
+
+#include "src/allocation.h"
+#include "src/base/smart-pointers.h"
+#include "src/builtins.h"
+#include "src/compiler/code-stub-assembler.h"
+#include "src/frames.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class InterpreterAssembler : public compiler::CodeStubAssembler {
+ public:
+  InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode);
+  virtual ~InterpreterAssembler();
+
+  // Returns the count immediate for bytecode operand |operand_index| in the
+  // current bytecode.
+  compiler::Node* BytecodeOperandCount(int operand_index);
+  // Returns the index immediate for bytecode operand |operand_index| in the
+  // current bytecode.
+  compiler::Node* BytecodeOperandIdx(int operand_index);
+  // Returns the Imm8 immediate for bytecode operand |operand_index| in the
+  // current bytecode.
+  compiler::Node* BytecodeOperandImm(int operand_index);
+  // Returns the register index for bytecode operand |operand_index| in the
+  // current bytecode.
+  compiler::Node* BytecodeOperandReg(int operand_index);
+
+  // Accumulator.
+  compiler::Node* GetAccumulator();
+  void SetAccumulator(compiler::Node* value);
+
+  // Context.
+  compiler::Node* GetContext();
+  void SetContext(compiler::Node* value);
+
+  // Loads from and stores to the interpreter register file.
+  compiler::Node* LoadRegister(int offset);
+  compiler::Node* LoadRegister(Register reg);
+  compiler::Node* LoadRegister(compiler::Node* reg_index);
+  compiler::Node* StoreRegister(compiler::Node* value, int offset);
+  compiler::Node* StoreRegister(compiler::Node* value, Register reg);
+  compiler::Node* StoreRegister(compiler::Node* value,
+                                compiler::Node* reg_index);
+
+  // Returns the next consecutive register.
+  compiler::Node* NextRegister(compiler::Node* reg_index);
+
+  // Returns the location in memory of the register |reg_index| in the
+  // interpreter register file.
+  compiler::Node* RegisterLocation(compiler::Node* reg_index);
+
+  // Load constant at |index| in the constant pool.
+  compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
+
+  // Load an element from a fixed array on the heap.
+  compiler::Node* LoadFixedArrayElement(compiler::Node* fixed_array, int index);
+
+  // Load a field from an object on the heap.
+  compiler::Node* LoadObjectField(compiler::Node* object, int offset);
+
+  // Load |slot_index| from |context|.
+  compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
+  compiler::Node* LoadContextSlot(compiler::Node* context,
+                                  compiler::Node* slot_index);
+  // Stores |value| into |slot_index| of |context|.
+  compiler::Node* StoreContextSlot(compiler::Node* context,
+                                   compiler::Node* slot_index,
+                                   compiler::Node* value);
+
+  // Load the TypeFeedbackVector for the current function.
+  compiler::Node* LoadTypeFeedbackVector();
+
+  // Call JSFunction or Callable |function| with |arg_count|
+  // arguments (not including receiver) and the first argument
+  // located at |first_arg|.
+  compiler::Node* CallJS(compiler::Node* function, compiler::Node* context,
+                         compiler::Node* first_arg, compiler::Node* arg_count,
+                         TailCallMode tail_call_mode);
+
+  // Call constructor |constructor| with |arg_count| arguments (not
+  // including receiver) and the first argument located at
+  // |first_arg|. The |new_target| is the same as the
+  // |constructor| for the new keyword, but differs for the super
+  // keyword.
+  compiler::Node* CallConstruct(compiler::Node* constructor,
+                                compiler::Node* context,
+                                compiler::Node* new_target,
+                                compiler::Node* first_arg,
+                                compiler::Node* arg_count);
+
+  // Call runtime function with |arg_count| arguments and the first argument
+  // located at |first_arg|.
+  compiler::Node* CallRuntimeN(compiler::Node* function_id,
+                               compiler::Node* context,
+                               compiler::Node* first_arg,
+                               compiler::Node* arg_count, int return_size = 1);
+
+  // Jump relative to the current bytecode by |jump_offset|.
+  void Jump(compiler::Node* jump_offset);
+
+  // Jump relative to the current bytecode by |jump_offset| if the
+  // |condition| is true. Helper function for JumpIfWordEqual and
+  // JumpIfWordNotEqual.
+  void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
+
+  // Jump relative to the current bytecode by |jump_offset| if the
+  // word values |lhs| and |rhs| are equal.
+  void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
+                       compiler::Node* jump_offset);
+
+  // Jump relative to the current bytecode by |jump_offset| if the
+  // word values |lhs| and |rhs| are not equal.
+  void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
+                          compiler::Node* jump_offset);
+
+  // Perform a stack guard check.
+  void StackCheck();
+
+  // Returns from the function.
+  void InterpreterReturn();
+
+  // Dispatch to the bytecode.
+  void Dispatch();
+
+  // Dispatch to bytecode handler.
+  void DispatchToBytecodeHandler(compiler::Node* handler,
+                                 compiler::Node* bytecode_offset);
+  void DispatchToBytecodeHandler(compiler::Node* handler) {
+    DispatchToBytecodeHandler(handler, BytecodeOffset());
+  }
+
+  // Abort with the given bailout reason.
+  void Abort(BailoutReason bailout_reason);
+
+ protected:
+  static bool TargetSupportsUnalignedAccess();
+
+ private:
+  // Returns a raw pointer to start of the register file on the stack.
+  compiler::Node* RegisterFileRawPointer();
+  // Returns a tagged pointer to the current function's BytecodeArray object.
+  compiler::Node* BytecodeArrayTaggedPointer();
+  // Returns the offset from the BytecodeArrayPointer of the current bytecode.
+  compiler::Node* BytecodeOffset();
+  // Returns a raw pointer to first entry in the interpreter dispatch table.
+  compiler::Node* DispatchTableRawPointer();
+
+  // Saves and restores interpreter bytecode offset to the interpreter stack
+  // frame when performing a call.
+  void CallPrologue() override;
+  void CallEpilogue() override;
+
+  // Traces the current bytecode by calling |function_id|.
+  void TraceBytecode(Runtime::FunctionId function_id);
+
+  // Updates the bytecode array's interrupt budget by |weight| and calls
+  // Runtime::kInterrupt if counter reaches zero.
+  void UpdateInterruptBudget(compiler::Node* weight);
+
+  // Returns the offset of register |index| relative to RegisterFilePointer().
+  compiler::Node* RegisterFrameOffset(compiler::Node* index);
+
+  compiler::Node* BytecodeOperand(int operand_index);
+  compiler::Node* BytecodeOperandSignExtended(int operand_index);
+  compiler::Node* BytecodeOperandShort(int operand_index);
+  compiler::Node* BytecodeOperandShortSignExtended(int operand_index);
+
+  // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
+  // update BytecodeOffset() itself.
+  compiler::Node* Advance(int delta);
+  compiler::Node* Advance(compiler::Node* delta);
+
+  // Starts next instruction dispatch at |new_bytecode_offset|.
+  void DispatchTo(compiler::Node* new_bytecode_offset);
+
+  // Abort operations for debug code.
+  void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
+                           BailoutReason bailout_reason);
+
+  Bytecode bytecode_;
+  CodeStubAssembler::Variable accumulator_;
+  CodeStubAssembler::Variable context_;
+  CodeStubAssembler::Variable bytecode_array_;
+
+  bool disable_stack_check_across_call_;
+  compiler::Node* stack_pointer_before_call_;
+
+  DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 574602b..eb88342 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -4,12 +4,13 @@
 
 #include "src/interpreter/interpreter.h"
 
+#include "src/ast/prettyprinter.h"
 #include "src/code-factory.h"
 #include "src/compiler.h"
-#include "src/compiler/interpreter-assembler.h"
 #include "src/factory.h"
 #include "src/interpreter/bytecode-generator.h"
 #include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter-assembler.h"
 #include "src/zone.h"
 
 namespace v8 {
@@ -20,52 +21,77 @@
 
 #define __ assembler->
 
-
-Interpreter::Interpreter(Isolate* isolate)
-    : isolate_(isolate) {}
-
-
-// static
-Handle<FixedArray> Interpreter::CreateUninitializedInterpreterTable(
-    Isolate* isolate) {
-  Handle<FixedArray> handler_table = isolate->factory()->NewFixedArray(
-      static_cast<int>(Bytecode::kLast) + 1, TENURED);
-  // We rely on the interpreter handler table being immovable, so check that
-  // it was allocated on the first page (which is always immovable).
-  DCHECK(isolate->heap()->old_space()->FirstPage()->Contains(
-      handler_table->address()));
-  return handler_table;
+Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
+  memset(&dispatch_table_, 0, sizeof(dispatch_table_));
 }
 
-
 void Interpreter::Initialize() {
   DCHECK(FLAG_ignition);
-  Handle<FixedArray> handler_table = isolate_->factory()->interpreter_table();
-  if (!IsInterpreterTableInitialized(handler_table)) {
-    Zone zone;
-    HandleScope scope(isolate_);
+  if (IsDispatchTableInitialized()) return;
+  Zone zone;
+  HandleScope scope(isolate_);
 
-#define GENERATE_CODE(Name, ...)                                      \
-    {                                                                 \
-      compiler::InterpreterAssembler assembler(isolate_, &zone,       \
-                                               Bytecode::k##Name);    \
-      Do##Name(&assembler);                                           \
-      Handle<Code> code = assembler.GenerateCode();                   \
-      handler_table->set(static_cast<int>(Bytecode::k##Name), *code); \
-    }
-    BYTECODE_LIST(GENERATE_CODE)
-#undef GENERATE_CODE
+#define GENERATE_CODE(Name, ...)                                        \
+  {                                                                     \
+    InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name); \
+    Do##Name(&assembler);                                               \
+    Handle<Code> code = assembler.GenerateCode();                       \
+    TraceCodegen(code, #Name);                                          \
+    dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] = *code;      \
   }
+  BYTECODE_LIST(GENERATE_CODE)
+#undef GENERATE_CODE
 }
 
+Code* Interpreter::GetBytecodeHandler(Bytecode bytecode) {
+  DCHECK(IsDispatchTableInitialized());
+  return dispatch_table_[Bytecodes::ToByte(bytecode)];
+}
+
+void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
+  v->VisitPointers(
+      reinterpret_cast<Object**>(&dispatch_table_[0]),
+      reinterpret_cast<Object**>(&dispatch_table_[0] + kDispatchTableSize));
+}
+
+// static
+int Interpreter::InterruptBudget() {
+  // TODO(ignition): Tune code size multiplier.
+  const int kCodeSizeMultiplier = 32;
+  return FLAG_interrupt_budget * kCodeSizeMultiplier;
+}
 
 bool Interpreter::MakeBytecode(CompilationInfo* info) {
+  if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) {
+    OFStream os(stdout);
+    base::SmartArrayPointer<char> name = info->GetDebugName();
+    os << "[generating bytecode for function: " << info->GetDebugName().get()
+       << "]" << std::endl
+       << std::flush;
+  }
+
+#ifdef DEBUG
+  if (info->parse_info() && FLAG_print_source) {
+    OFStream os(stdout);
+    os << "--- Source from AST ---" << std::endl
+       << PrettyPrinter(info->isolate()).PrintProgram(info->literal())
+       << std::endl
+       << std::flush;
+  }
+
+  if (info->parse_info() && FLAG_print_ast) {
+    OFStream os(stdout);
+    os << "--- AST ---" << std::endl
+       << AstPrinter(info->isolate()).PrintProgram(info->literal()) << std::endl
+       << std::flush;
+  }
+#endif  // DEBUG
+
   BytecodeGenerator generator(info->isolate(), info->zone());
   info->EnsureFeedbackVector();
   Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
   if (FLAG_print_bytecode) {
     OFStream os(stdout);
-    os << "Function: " << info->GetDebugName().get() << std::endl;
     bytecodes->Print(os);
     os << std::flush;
   }
@@ -75,18 +101,28 @@
   return true;
 }
 
-
-bool Interpreter::IsInterpreterTableInitialized(
-    Handle<FixedArray> handler_table) {
-  DCHECK(handler_table->length() == static_cast<int>(Bytecode::kLast) + 1);
-  return handler_table->get(0) != isolate_->heap()->undefined_value();
+bool Interpreter::IsDispatchTableInitialized() {
+  if (FLAG_trace_ignition) {
+    // Regenerate table to add bytecode tracing operations.
+    return false;
+  }
+  return dispatch_table_[0] != nullptr;
 }
 
+void Interpreter::TraceCodegen(Handle<Code> code, const char* name) {
+#ifdef ENABLE_DISASSEMBLER
+  if (FLAG_trace_ignition_codegen) {
+    OFStream os(stdout);
+    code->Disassemble(name, os);
+    os << std::flush;
+  }
+#endif  // ENABLE_DISASSEMBLER
+}
 
 // LdaZero
 //
 // Load literal '0' into the accumulator.
-void Interpreter::DoLdaZero(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
   Node* zero_value = __ NumberConstant(0.0);
   __ SetAccumulator(zero_value);
   __ Dispatch();
@@ -96,15 +132,14 @@
 // LdaSmi8 <imm8>
 //
 // Load an 8-bit integer literal into the accumulator as a Smi.
-void Interpreter::DoLdaSmi8(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaSmi8(InterpreterAssembler* assembler) {
   Node* raw_int = __ BytecodeOperandImm(0);
   Node* smi_int = __ SmiTag(raw_int);
   __ SetAccumulator(smi_int);
   __ Dispatch();
 }
 
-
-void Interpreter::DoLoadConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLoadConstant(InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   __ SetAccumulator(constant);
@@ -115,7 +150,7 @@
 // LdaConstant <idx>
 //
 // Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
   DoLoadConstant(assembler);
 }
 
@@ -123,7 +158,7 @@
 // LdaConstantWide <idx>
 //
 // Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstantWide(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaConstantWide(InterpreterAssembler* assembler) {
   DoLoadConstant(assembler);
 }
 
@@ -131,7 +166,7 @@
 // LdaUndefined
 //
 // Load Undefined into the accumulator.
-void Interpreter::DoLdaUndefined(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
   Node* undefined_value =
       __ HeapConstant(isolate_->factory()->undefined_value());
   __ SetAccumulator(undefined_value);
@@ -142,7 +177,7 @@
 // LdaNull
 //
 // Load Null into the accumulator.
-void Interpreter::DoLdaNull(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaNull(InterpreterAssembler* assembler) {
   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
   __ SetAccumulator(null_value);
   __ Dispatch();
@@ -152,7 +187,7 @@
 // LdaTheHole
 //
 // Load TheHole into the accumulator.
-void Interpreter::DoLdaTheHole(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) {
   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
   __ SetAccumulator(the_hole_value);
   __ Dispatch();
@@ -162,7 +197,7 @@
 // LdaTrue
 //
 // Load True into the accumulator.
-void Interpreter::DoLdaTrue(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) {
   Node* true_value = __ HeapConstant(isolate_->factory()->true_value());
   __ SetAccumulator(true_value);
   __ Dispatch();
@@ -172,7 +207,7 @@
 // LdaFalse
 //
 // Load False into the accumulator.
-void Interpreter::DoLdaFalse(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) {
   Node* false_value = __ HeapConstant(isolate_->factory()->false_value());
   __ SetAccumulator(false_value);
   __ Dispatch();
@@ -182,7 +217,7 @@
 // Ldar <src>
 //
 // Load accumulator with value from register <src>.
-void Interpreter::DoLdar(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdar(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* value = __ LoadRegister(reg_index);
   __ SetAccumulator(value);
@@ -193,7 +228,7 @@
 // Star <dst>
 //
 // Store accumulator to register <dst>.
-void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStar(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* accumulator = __ GetAccumulator();
   __ StoreRegister(accumulator, reg_index);
@@ -201,32 +236,10 @@
 }
 
 
-// Exchange <reg8> <reg16>
-//
-// Exchange two registers.
-void Interpreter::DoExchange(compiler::InterpreterAssembler* assembler) {
-  Node* reg0_index = __ BytecodeOperandReg(0);
-  Node* reg1_index = __ BytecodeOperandReg(1);
-  Node* reg0_value = __ LoadRegister(reg0_index);
-  Node* reg1_value = __ LoadRegister(reg1_index);
-  __ StoreRegister(reg1_value, reg0_index);
-  __ StoreRegister(reg0_value, reg1_index);
-  __ Dispatch();
-}
-
-
-// ExchangeWide <reg16> <reg16>
-//
-// Exchange two registers.
-void Interpreter::DoExchangeWide(compiler::InterpreterAssembler* assembler) {
-  return DoExchange(assembler);
-}
-
-
 // Mov <src> <dst>
 //
 // Stores the value of register <src> to register <dst>.
-void Interpreter::DoMov(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMov(InterpreterAssembler* assembler) {
   Node* src_index = __ BytecodeOperandReg(0);
   Node* src_value = __ LoadRegister(src_index);
   Node* dst_index = __ BytecodeOperandReg(1);
@@ -235,8 +248,14 @@
 }
 
 
-void Interpreter::DoLoadGlobal(Callable ic,
-                               compiler::InterpreterAssembler* assembler) {
+// MovWide <src> <dst>
+//
+// Stores the value of register <src> to register <dst>.
+void Interpreter::DoMovWide(InterpreterAssembler* assembler) {
+  DoMov(assembler);
+}
+
+void Interpreter::DoLoadGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
   Node* native_context =
@@ -250,109 +269,54 @@
   Node* raw_slot = __ BytecodeOperandIdx(1);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = __ CallIC(ic.descriptor(), code_target, global, name, smi_slot,
-                           type_feedback_vector);
+  Node* result = __ CallStub(ic.descriptor(), code_target, context, global,
+                             name, smi_slot, type_feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
-// LdaGlobalSloppy <name_index> <slot>
+// LdaGlobal <name_index> <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> outside of a typeof.
+void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
+                                                   UNINITIALIZED);
   DoLoadGlobal(ic, assembler);
 }
 
-
-// LdaGlobalSloppy <name_index> <slot>
+// LdaGlobalInsideTypeof <name_index> <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> inside of a typeof.
+void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
+                                                   UNINITIALIZED);
+  DoLoadGlobal(ic, assembler);
+}
+
+// LdaGlobalWide <name_index> <slot>
+//
+// Load the global with name in constant pool entry <name_index> into the
+// accumulator using FeedBackVector slot <slot> outside of a typeof.
+void Interpreter::DoLdaGlobalWide(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
+                                                   UNINITIALIZED);
   DoLoadGlobal(ic, assembler);
 }
 
-
-// LdaGlobalInsideTypeofSloppy <name_index> <slot>
+// LdaGlobalInsideTypeofWide <name_index> <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalInsideTypeofSloppy(
-    compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> inside of a typeof.
+void Interpreter::DoLdaGlobalInsideTypeofWide(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
+                                                   UNINITIALIZED);
   DoLoadGlobal(ic, assembler);
 }
 
 
-// LdaGlobalInsideTypeofStrict <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalInsideTypeofStrict(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalInsideTypeofSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalInsideTypeofStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadGlobal(ic, assembler);
-}
-
-
-void Interpreter::DoStoreGlobal(Callable ic,
-                                compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
   // Get the global object.
   Node* context = __ GetContext();
   Node* native_context =
@@ -367,8 +331,8 @@
   Node* raw_slot = __ BytecodeOperandIdx(1);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  __ CallIC(ic.descriptor(), code_target, global, name, value, smi_slot,
-            type_feedback_vector);
+  __ CallStub(ic.descriptor(), code_target, context, global, name, value,
+              smi_slot, type_feedback_vector);
 
   __ Dispatch();
 }
@@ -378,7 +342,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoStaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -389,7 +353,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoStaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -400,8 +364,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoStaGlobalSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalSloppyWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -412,8 +375,7 @@
 //
 // Store the value in the accumulator into the global with name in constant pool
 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoStaGlobalStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalStrictWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreGlobal(ic, assembler);
@@ -423,7 +385,7 @@
 // LdaContextSlot <context> <slot_index>
 //
 // Load the object in |slot_index| of |context| into the accumulator.
-void Interpreter::DoLdaContextSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* context = __ LoadRegister(reg_index);
   Node* slot_index = __ BytecodeOperandIdx(1);
@@ -436,8 +398,7 @@
 // LdaContextSlotWide <context> <slot_index>
 //
 // Load the object in |slot_index| of |context| into the accumulator.
-void Interpreter::DoLdaContextSlotWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaContextSlotWide(InterpreterAssembler* assembler) {
   DoLdaContextSlot(assembler);
 }
 
@@ -445,7 +406,7 @@
 // StaContextSlot <context> <slot_index>
 //
 // Stores the object in the accumulator into |slot_index| of |context|.
-void Interpreter::DoStaContextSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* context = __ LoadRegister(reg_index);
@@ -458,19 +419,16 @@
 // StaContextSlot <context> <slot_index>
 //
 // Stores the object in the accumulator into |slot_index| of |context|.
-void Interpreter::DoStaContextSlotWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaContextSlotWide(InterpreterAssembler* assembler) {
   DoStaContextSlot(assembler);
 }
 
-
 void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
-                                   compiler::InterpreterAssembler* assembler) {
+                                   InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* name = __ LoadConstantPoolEntry(index);
   Node* context = __ GetContext();
-  Node* result_pair = __ CallRuntime(function_id, context, name);
-  Node* result = __ Projection(0, result_pair);
+  Node* result = __ CallRuntime(function_id, context, name);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -480,7 +438,7 @@
 //
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically.
-void Interpreter::DoLdaLookupSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
   DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
 }
 
@@ -489,9 +447,8 @@
 //
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically without causing a NoReferenceError.
-void Interpreter::DoLdaLookupSlotInsideTypeof(
-    compiler::InterpreterAssembler* assembler) {
-  DoLoadLookupSlot(Runtime::kLoadLookupSlotNoReferenceError, assembler);
+void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
+  DoLoadLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
 }
 
 
@@ -499,8 +456,7 @@
 //
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically.
-void Interpreter::DoLdaLookupSlotWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaLookupSlotWide(InterpreterAssembler* assembler) {
   DoLdaLookupSlot(assembler);
 }
 
@@ -510,20 +466,20 @@
 // Lookup the object with the name in constant pool entry |name_index|
 // dynamically without causing a NoReferenceError.
 void Interpreter::DoLdaLookupSlotInsideTypeofWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoLdaLookupSlotInsideTypeof(assembler);
 }
 
-
 void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
-                                    compiler::InterpreterAssembler* assembler) {
+                                    InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* name = __ LoadConstantPoolEntry(index);
   Node* context = __ GetContext();
-  Node* language_mode_node = __ NumberConstant(language_mode);
-  Node* result = __ CallRuntime(Runtime::kStoreLookupSlot, value, context, name,
-                                language_mode_node);
+  Node* result = __ CallRuntime(is_strict(language_mode)
+                                    ? Runtime::kStoreLookupSlot_Strict
+                                    : Runtime::kStoreLookupSlot_Sloppy,
+                                context, name, value);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -533,8 +489,7 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in sloppy mode.
-void Interpreter::DoStaLookupSlotSloppy(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) {
   DoStoreLookupSlot(LanguageMode::SLOPPY, assembler);
 }
 
@@ -543,8 +498,7 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in strict mode.
-void Interpreter::DoStaLookupSlotStrict(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
   DoStoreLookupSlot(LanguageMode::STRICT, assembler);
 }
 
@@ -553,8 +507,7 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in sloppy mode.
-void Interpreter::DoStaLookupSlotSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotSloppyWide(InterpreterAssembler* assembler) {
   DoStaLookupSlotSloppy(assembler);
 }
 
@@ -563,14 +516,11 @@
 //
 // Store the object in accumulator to the object with the name in constant
 // pool entry |name_index| in strict mode.
-void Interpreter::DoStaLookupSlotStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotStrictWide(InterpreterAssembler* assembler) {
   DoStaLookupSlotStrict(assembler);
 }
 
-
-void Interpreter::DoLoadIC(Callable ic,
-                           compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLoadIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* register_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(register_index);
@@ -579,61 +529,35 @@
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
-                           type_feedback_vector);
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+                             name, smi_slot, type_feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
-// LoadICSloppy <object> <name_index> <slot>
+// LoadIC <object> <name_index> <slot>
 //
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICSloppy(compiler::InterpreterAssembler* assembler) {
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLoadIC(InterpreterAssembler* assembler) {
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
+                                                   UNINITIALIZED);
+  DoLoadIC(ic, assembler);
+}
+
+// LoadICWide <object> <name_index> <slot>
+//
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLoadICWide(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+                                                   UNINITIALIZED);
   DoLoadIC(ic, assembler);
 }
 
 
-// LoadICStrict <object> <name_index> <slot>
-//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICStrict(compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadIC(ic, assembler);
-}
-
-
-// LoadICSloppyWide <object> <name_index> <slot>
-//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   SLOPPY, UNINITIALIZED);
-  DoLoadIC(ic, assembler);
-}
-
-
-// LoadICStrictWide <object> <name_index> <slot>
-//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
-                                                   STRICT, UNINITIALIZED);
-  DoLoadIC(ic, assembler);
-}
-
-
-void Interpreter::DoKeyedLoadIC(Callable ic,
-                                compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(reg_index);
@@ -641,63 +565,35 @@
   Node* raw_slot = __ BytecodeOperandIdx(1);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
-                           type_feedback_vector);
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+                             name, smi_slot, type_feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
-// KeyedLoadICSloppy <object> <slot>
+// KeyedLoadIC <object> <slot>
 //
-// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICSloppy(
-    compiler::InterpreterAssembler* assembler) {
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoKeyedLoadIC(InterpreterAssembler* assembler) {
   Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
+  DoKeyedLoadIC(ic, assembler);
+}
+
+// KeyedLoadICWide <object> <slot>
+//
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoKeyedLoadICWide(InterpreterAssembler* assembler) {
+  Callable ic =
+      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
   DoKeyedLoadIC(ic, assembler);
 }
 
 
-// KeyedLoadICStrict <object> <slot>
-//
-// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICStrict(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
-}
-
-
-// KeyedLoadICSloppyWide <object> <slot>
-//
-// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
-}
-
-
-// KeyedLoadICStrictWide <object> <slot>
-//
-// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
-  DoKeyedLoadIC(ic, assembler);
-}
-
-
-void Interpreter::DoStoreIC(Callable ic,
-                            compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(object_reg_index);
@@ -707,8 +603,9 @@
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
-            type_feedback_vector);
+  Node* context = __ GetContext();
+  __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+              smi_slot, type_feedback_vector);
   __ Dispatch();
 }
 
@@ -718,7 +615,7 @@
 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICSloppy(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICSloppy(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreIC(ic, assembler);
@@ -730,7 +627,7 @@
 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICStrict(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICStrict(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreIC(ic, assembler);
@@ -742,8 +639,7 @@
 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICSloppyWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoStoreIC(ic, assembler);
@@ -755,16 +651,13 @@
 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
 // the name in constant pool entry <name_index> with the value in the
 // accumulator.
-void Interpreter::DoStoreICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICStrictWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoStoreIC(ic, assembler);
 }
 
-
-void Interpreter::DoKeyedStoreIC(Callable ic,
-                                 compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(object_reg_index);
@@ -774,8 +667,9 @@
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
   Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
-            type_feedback_vector);
+  Node* context = __ GetContext();
+  __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+              smi_slot, type_feedback_vector);
   __ Dispatch();
 }
 
@@ -784,8 +678,7 @@
 //
 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppy(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICSloppy(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
@@ -796,8 +689,7 @@
 //
 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrict(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICStrict(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
@@ -808,8 +700,7 @@
 //
 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppyWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICSloppyWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
@@ -820,22 +711,22 @@
 //
 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
 // and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrictWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICStrictWide(InterpreterAssembler* assembler) {
   Callable ic =
       CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
   DoKeyedStoreIC(ic, assembler);
 }
 
-
 // PushContext <context>
 //
-// Pushes the accumulator as the current context, and saves it in <context>
-void Interpreter::DoPushContext(compiler::InterpreterAssembler* assembler) {
+// Saves the current context in <context>, and pushes the accumulator as the
+// new current context.
+void Interpreter::DoPushContext(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
-  Node* context = __ GetAccumulator();
-  __ SetContext(context);
-  __ StoreRegister(context, reg_index);
+  Node* new_context = __ GetAccumulator();
+  Node* old_context = __ GetContext();
+  __ StoreRegister(old_context, reg_index);
+  __ SetContext(new_context);
   __ Dispatch();
 }
 
@@ -843,22 +734,22 @@
 // PopContext <context>
 //
 // Pops the current context and sets <context> as the new context.
-void Interpreter::DoPopContext(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* context = __ LoadRegister(reg_index);
   __ SetContext(context);
   __ Dispatch();
 }
 
-
 void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
-                             compiler::InterpreterAssembler* assembler) {
+                             InterpreterAssembler* assembler) {
   // TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
   // operations, instead of calling builtins directly.
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* lhs = __ LoadRegister(reg_index);
   Node* rhs = __ GetAccumulator();
-  Node* result = __ CallRuntime(function_id, lhs, rhs);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, lhs, rhs);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -867,7 +758,7 @@
 // Add <src>
 //
 // Add register <src> to accumulator.
-void Interpreter::DoAdd(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoAdd(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kAdd, assembler);
 }
 
@@ -875,7 +766,7 @@
 // Sub <src>
 //
 // Subtract register <src> from accumulator.
-void Interpreter::DoSub(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoSub(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kSubtract, assembler);
 }
 
@@ -883,7 +774,7 @@
 // Mul <src>
 //
 // Multiply accumulator by register <src>.
-void Interpreter::DoMul(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMul(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kMultiply, assembler);
 }
 
@@ -891,7 +782,7 @@
 // Div <src>
 //
 // Divide register <src> by accumulator.
-void Interpreter::DoDiv(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDiv(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kDivide, assembler);
 }
 
@@ -899,7 +790,7 @@
 // Mod <src>
 //
 // Modulo register <src> by accumulator.
-void Interpreter::DoMod(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMod(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kModulus, assembler);
 }
 
@@ -907,7 +798,7 @@
 // BitwiseOr <src>
 //
 // BitwiseOr register <src> to accumulator.
-void Interpreter::DoBitwiseOr(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kBitwiseOr, assembler);
 }
 
@@ -915,7 +806,7 @@
 // BitwiseXor <src>
 //
 // BitwiseXor register <src> to accumulator.
-void Interpreter::DoBitwiseXor(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kBitwiseXor, assembler);
 }
 
@@ -923,7 +814,7 @@
 // BitwiseAnd <src>
 //
 // BitwiseAnd register <src> to accumulator.
-void Interpreter::DoBitwiseAnd(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kBitwiseAnd, assembler);
 }
 
@@ -934,7 +825,7 @@
 // Register <src> is converted to an int32 and the accumulator to uint32
 // before the operation. 5 lsb bits from the accumulator are used as count
 // i.e. <src> << (accumulator & 0x1F).
-void Interpreter::DoShiftLeft(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kShiftLeft, assembler);
 }
 
@@ -945,7 +836,7 @@
 // Result is sign extended. Register <src> is converted to an int32 and the
 // accumulator to uint32 before the operation. 5 lsb bits from the accumulator
 // are used as count i.e. <src> >> (accumulator & 0x1F).
-void Interpreter::DoShiftRight(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kShiftRight, assembler);
 }
 
@@ -956,17 +847,16 @@
 // Result is zero-filled. The accumulator and register <src> are converted to
 // uint32 before the operation 5 lsb bits from the accumulator are used as
 // count i.e. <src> << (accumulator & 0x1F).
-void Interpreter::DoShiftRightLogical(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kShiftRightLogical, assembler);
 }
 
-
 void Interpreter::DoCountOp(Runtime::FunctionId function_id,
-                            compiler::InterpreterAssembler* assembler) {
+                            InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
   Node* one = __ NumberConstant(1);
-  Node* result = __ CallRuntime(function_id, value, one);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, value, one);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -975,7 +865,7 @@
 // Inc
 //
 // Increments value in the accumulator by one.
-void Interpreter::DoInc(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoInc(InterpreterAssembler* assembler) {
   DoCountOp(Runtime::kAdd, assembler);
 }
 
@@ -983,7 +873,7 @@
 // Dec
 //
 // Decrements value in the accumulator by one.
-void Interpreter::DoDec(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDec(InterpreterAssembler* assembler) {
   DoCountOp(Runtime::kSubtract, assembler);
 }
 
@@ -992,9 +882,11 @@
 //
 // Perform logical-not on the accumulator, first casting the
 // accumulator to a boolean value if required.
-void Interpreter::DoLogicalNot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kInterpreterLogicalNot, accumulator);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kInterpreterLogicalNot, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1004,20 +896,22 @@
 //
 // Load the accumulator with the string representating type of the
 // object in the accumulator.
-void Interpreter::DoTypeOf(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kInterpreterTypeOf, accumulator);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kInterpreterTypeOf, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
-
 void Interpreter::DoDelete(Runtime::FunctionId function_id,
-                           compiler::InterpreterAssembler* assembler) {
+                           InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(reg_index);
   Node* key = __ GetAccumulator();
-  Node* result = __ CallRuntime(function_id, object, key);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, object, key);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1027,8 +921,7 @@
 //
 // Delete the property specified in the accumulator from the object
 // referenced by the register operand following strict mode semantics.
-void Interpreter::DoDeletePropertyStrict(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) {
   DoDelete(Runtime::kDeleteProperty_Strict, assembler);
 }
 
@@ -1037,34 +930,23 @@
 //
 // Delete the property specified in the accumulator from the object
 // referenced by the register operand following sloppy mode semantics.
-void Interpreter::DoDeletePropertySloppy(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) {
   DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
 }
 
-
-// DeleteLookupSlot
-//
-// Delete the variable with the name specified in the accumulator by dynamically
-// looking it up.
-void Interpreter::DoDeleteLookupSlot(
-    compiler::InterpreterAssembler* assembler) {
-  Node* name = __ GetAccumulator();
-  Node* context = __ GetContext();
-  Node* result = __ CallRuntime(Runtime::kDeleteLookupSlot, context, name);
-  __ SetAccumulator(result);
-  __ Dispatch();
-}
-
-
-void Interpreter::DoJSCall(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJSCall(InterpreterAssembler* assembler,
+                           TailCallMode tail_call_mode) {
   Node* function_reg = __ BytecodeOperandReg(0);
   Node* function = __ LoadRegister(function_reg);
   Node* receiver_reg = __ BytecodeOperandReg(1);
-  Node* first_arg = __ RegisterLocation(receiver_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
-  // TODO(rmcilroy): Use the call type feedback slot to call via CallIC.
-  Node* result = __ CallJS(function, first_arg, args_count);
+  Node* receiver_arg = __ RegisterLocation(receiver_reg);
+  Node* receiver_args_count = __ BytecodeOperandCount(2);
+  Node* receiver_count = __ Int32Constant(1);
+  Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
+  Node* context = __ GetContext();
+  // TODO(rmcilroy): Use the call type feedback slot to call via CallStub.
+  Node* result =
+      __ CallJS(function, context, receiver_arg, args_count, tail_call_mode);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1074,8 +956,8 @@
 //
 // Call a JSfunction or Callable in |callable| with the |receiver| and
 // |arg_count| arguments in subsequent registers.
-void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
-  DoJSCall(assembler);
+void Interpreter::DoCall(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kDisallow);
 }
 
 
@@ -1083,8 +965,35 @@
 //
 // Call a JSfunction or Callable in |callable| with the |receiver| and
 // |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallWide(compiler::InterpreterAssembler* assembler) {
-  DoJSCall(assembler);
+void Interpreter::DoCallWide(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kDisallow);
+}
+
+// TailCall <callable> <receiver> <arg_count>
+//
+// Tail call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kAllow);
+}
+
+// TailCallWide <callable> <receiver> <arg_count>
+//
+// Tail call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoTailCallWide(InterpreterAssembler* assembler) {
+  DoJSCall(assembler, TailCallMode::kAllow);
+}
+
+void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
+  Node* function_id = __ BytecodeOperandIdx(0);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(first_arg_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count);
+  __ SetAccumulator(result);
+  __ Dispatch();
 }
 
 
@@ -1093,13 +1002,37 @@
 // Call the runtime function |function_id| with the first argument in
 // register |first_arg| and |arg_count| arguments in subsequent
 // registers.
-void Interpreter::DoCallRuntime(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
+  DoCallRuntimeCommon(assembler);
+}
+
+
+// CallRuntime <function_id> <first_arg> <arg_count>
+//
+// Call the runtime function |function_id| with the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers.
+void Interpreter::DoCallRuntimeWide(InterpreterAssembler* assembler) {
+  DoCallRuntimeCommon(assembler);
+}
+
+void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
+  // Call the runtime function.
   Node* function_id = __ BytecodeOperandIdx(0);
   Node* first_arg_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(first_arg_reg);
   Node* args_count = __ BytecodeOperandCount(2);
-  Node* result = __ CallRuntime(function_id, first_arg, args_count);
-  __ SetAccumulator(result);
+  Node* context = __ GetContext();
+  Node* result_pair =
+      __ CallRuntimeN(function_id, context, first_arg, args_count, 2);
+
+  // Store the results in <first_return> and <first_return + 1>
+  Node* first_return_reg = __ BytecodeOperandReg(3);
+  Node* second_return_reg = __ NextRegister(first_return_reg);
+  Node* result0 = __ Projection(0, result_pair);
+  Node* result1 = __ Projection(1, result_pair);
+  __ StoreRegister(result0, first_return_reg);
+  __ StoreRegister(result1, second_return_reg);
   __ Dispatch();
 }
 
@@ -1110,36 +1043,28 @@
 // first argument in register |first_arg| and |arg_count| arguments in
 // subsequent registers. Returns the result in <first_return> and
 // <first_return + 1>
-void Interpreter::DoCallRuntimeForPair(
-    compiler::InterpreterAssembler* assembler) {
-  // Call the runtime function.
-  Node* function_id = __ BytecodeOperandIdx(0);
-  Node* first_arg_reg = __ BytecodeOperandReg(1);
-  Node* first_arg = __ RegisterLocation(first_arg_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
-  Node* result_pair = __ CallRuntime(function_id, first_arg, args_count, 2);
-
-  // Store the results in <first_return> and <first_return + 1>
-  Node* first_return_reg = __ BytecodeOperandReg(3);
-  Node* second_return_reg = __ NextRegister(first_return_reg);
-  Node* result0 = __ Projection(0, result_pair);
-  Node* result1 = __ Projection(1, result_pair);
-  __ StoreRegister(result0, first_return_reg);
-  __ StoreRegister(result1, second_return_reg);
-
-  __ Dispatch();
+void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
+  DoCallRuntimeForPairCommon(assembler);
 }
 
 
-// CallJSRuntime <context_index> <receiver> <arg_count>
+// CallRuntimeForPairWide <function_id> <first_arg> <arg_count> <first_return>
 //
-// Call the JS runtime function that has the |context_index| with the receiver
-// in register |receiver| and |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
+// Call the runtime function |function_id| which returns a pair, with the
+// first argument in register |first_arg| and |arg_count| arguments in
+// subsequent registers. Returns the result in <first_return> and
+// <first_return + 1>
+void Interpreter::DoCallRuntimeForPairWide(InterpreterAssembler* assembler) {
+  DoCallRuntimeForPairCommon(assembler);
+}
+
+void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
   Node* context_index = __ BytecodeOperandIdx(0);
   Node* receiver_reg = __ BytecodeOperandReg(1);
   Node* first_arg = __ RegisterLocation(receiver_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
+  Node* receiver_args_count = __ BytecodeOperandCount(2);
+  Node* receiver_count = __ Int32Constant(1);
+  Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
 
   // Get the function to call from the native context.
   Node* context = __ GetContext();
@@ -1148,7 +1073,41 @@
   Node* function = __ LoadContextSlot(native_context, context_index);
 
   // Call the function.
-  Node* result = __ CallJS(function, first_arg, args_count);
+  Node* result = __ CallJS(function, context, first_arg, args_count,
+                           TailCallMode::kDisallow);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+
+// CallJSRuntime <context_index> <receiver> <arg_count>
+//
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
+  DoCallJSRuntimeCommon(assembler);
+}
+
+
+// CallJSRuntimeWide <context_index> <receiver> <arg_count>
+//
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntimeWide(InterpreterAssembler* assembler) {
+  DoCallJSRuntimeCommon(assembler);
+}
+
+void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
+  Node* new_target = __ GetAccumulator();
+  Node* constructor_reg = __ BytecodeOperandReg(0);
+  Node* constructor = __ LoadRegister(constructor_reg);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(first_arg_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallConstruct(constructor, context, new_target, first_arg, args_count);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1158,42 +1117,45 @@
 //
 // Call operator new with |constructor| and the first argument in
 // register |first_arg| and |arg_count| arguments in subsequent
+// registers. The new.target is in the accumulator.
 //
-void Interpreter::DoNew(compiler::InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
-  Node* constructor_reg = __ BytecodeOperandReg(0);
-  Node* constructor = __ LoadRegister(constructor_reg);
-  Node* first_arg_reg = __ BytecodeOperandReg(1);
-  Node* first_arg = __ RegisterLocation(first_arg_reg);
-  Node* args_count = __ BytecodeOperandCount(2);
-  Node* result =
-      __ CallConstruct(constructor, constructor, first_arg, args_count);
-  __ SetAccumulator(result);
-  __ Dispatch();
+void Interpreter::DoNew(InterpreterAssembler* assembler) {
+  DoCallConstruct(assembler);
+}
+
+
+// NewWide <constructor> <first_arg> <arg_count>
+//
+// Call operator new with |constructor| and the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers. The new.target is in the accumulator.
+//
+void Interpreter::DoNewWide(InterpreterAssembler* assembler) {
+  DoCallConstruct(assembler);
 }
 
 
 // TestEqual <src>
 //
 // Test if the value in the <src> register equals the accumulator.
-void Interpreter::DoTestEqual(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterEquals, assembler);
+void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kEqual, assembler);
 }
 
 
 // TestNotEqual <src>
 //
 // Test if the value in the <src> register is not equal to the accumulator.
-void Interpreter::DoTestNotEqual(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterNotEquals, assembler);
+void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kNotEqual, assembler);
 }
 
 
 // TestEqualStrict <src>
 //
 // Test if the value in the <src> register is strictly equal to the accumulator.
-void Interpreter::DoTestEqualStrict(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterStrictEquals, assembler);
+void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kStrictEqual, assembler);
 }
 
 
@@ -1201,25 +1163,24 @@
 //
 // Test if the value in the <src> register is not strictly equal to the
 // accumulator.
-void Interpreter::DoTestNotEqualStrict(
-    compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterStrictNotEquals, assembler);
+void Interpreter::DoTestNotEqualStrict(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kStrictNotEqual, assembler);
 }
 
 
 // TestLessThan <src>
 //
 // Test if the value in the <src> register is less than the accumulator.
-void Interpreter::DoTestLessThan(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterLessThan, assembler);
+void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kLessThan, assembler);
 }
 
 
 // TestGreaterThan <src>
 //
 // Test if the value in the <src> register is greater than the accumulator.
-void Interpreter::DoTestGreaterThan(compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterGreaterThan, assembler);
+void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kGreaterThan, assembler);
 }
 
 
@@ -1227,9 +1188,8 @@
 //
 // Test if the value in the <src> register is less than or equal to the
 // accumulator.
-void Interpreter::DoTestLessThanOrEqual(
-    compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterLessThanOrEqual, assembler);
+void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kLessThanOrEqual, assembler);
 }
 
 
@@ -1237,9 +1197,8 @@
 //
 // Test if the value in the <src> register is greater than or equal to the
 // accumulator.
-void Interpreter::DoTestGreaterThanOrEqual(
-    compiler::InterpreterAssembler* assembler) {
-  DoBinaryOp(Runtime::kInterpreterGreaterThanOrEqual, assembler);
+void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
+  DoBinaryOp(Runtime::kGreaterThanOrEqual, assembler);
 }
 
 
@@ -1247,7 +1206,7 @@
 //
 // Test if the object referenced by the register operand is a property of the
 // object referenced by the accumulator.
-void Interpreter::DoTestIn(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kHasProperty, assembler);
 }
 
@@ -1256,7 +1215,7 @@
 //
 // Test if the object referenced by the <src> register is an an instance of type
 // referenced by the accumulator.
-void Interpreter::DoTestInstanceOf(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
   DoBinaryOp(Runtime::kInstanceOf, assembler);
 }
 
@@ -1264,9 +1223,10 @@
 // ToName
 //
 // Cast the object referenced by the accumulator to a name.
-void Interpreter::DoToName(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToName(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kToName, accumulator);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kToName, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1275,9 +1235,10 @@
 // ToNumber
 //
 // Cast the object referenced by the accumulator to a number.
-void Interpreter::DoToNumber(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kToNumber, accumulator);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kToNumber, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1286,9 +1247,10 @@
 // ToObject
 //
 // Cast the object referenced by the accumulator to a JSObject.
-void Interpreter::DoToObject(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToObject(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kToObject, accumulator);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kToObject, context, accumulator);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1297,7 +1259,7 @@
 // Jump <imm8>
 //
 // Jump by number of bytes represented by the immediate operand |imm8|.
-void Interpreter::DoJump(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJump(InterpreterAssembler* assembler) {
   Node* relative_jump = __ BytecodeOperandImm(0);
   __ Jump(relative_jump);
 }
@@ -1306,7 +1268,7 @@
 // JumpConstant <idx8>
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool.
-void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1318,8 +1280,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the
 // constant pool.
-void Interpreter::DoJumpConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpConstantWide(InterpreterAssembler* assembler) {
   DoJumpConstant(assembler);
 }
 
@@ -1328,7 +1289,7 @@
 //
 // Jump by number of bytes represented by an immediate operand if the
 // accumulator contains true.
-void Interpreter::DoJumpIfTrue(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* true_value = __ BooleanConstant(true);
@@ -1340,8 +1301,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the accumulator contains true.
-void Interpreter::DoJumpIfTrueConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
@@ -1355,8 +1315,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the accumulator contains true.
-void Interpreter::DoJumpIfTrueConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrueConstantWide(InterpreterAssembler* assembler) {
   DoJumpIfTrueConstant(assembler);
 }
 
@@ -1365,7 +1324,7 @@
 //
 // Jump by number of bytes represented by an immediate operand if the
 // accumulator contains false.
-void Interpreter::DoJumpIfFalse(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* false_value = __ BooleanConstant(false);
@@ -1377,8 +1336,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the accumulator contains false.
-void Interpreter::DoJumpIfFalseConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
@@ -1392,8 +1350,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the accumulator contains false.
-void Interpreter::DoJumpIfFalseConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalseConstantWide(InterpreterAssembler* assembler) {
   DoJumpIfFalseConstant(assembler);
 }
 
@@ -1402,11 +1359,11 @@
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is true when the object is cast to boolean.
-void Interpreter::DoJumpIfToBooleanTrue(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* true_value = __ BooleanConstant(true);
   __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
@@ -1419,10 +1376,11 @@
 // if the object referenced by the accumulator is true when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanTrueConstant(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1437,7 +1395,7 @@
 // if the object referenced by the accumulator is true when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanTrueConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoJumpIfToBooleanTrueConstant(assembler);
 }
 
@@ -1446,11 +1404,11 @@
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is false when the object is cast to boolean.
-void Interpreter::DoJumpIfToBooleanFalse(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* relative_jump = __ BytecodeOperandImm(0);
   Node* false_value = __ BooleanConstant(false);
   __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
@@ -1463,10 +1421,11 @@
 // if the object referenced by the accumulator is false when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanFalseConstant(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
+  Node* context = __ GetContext();
   Node* to_boolean_value =
-      __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+      __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant = __ LoadConstantPoolEntry(index);
   Node* relative_jump = __ SmiUntag(constant);
@@ -1481,7 +1440,7 @@
 // if the object referenced by the accumulator is false when the object is cast
 // to boolean.
 void Interpreter::DoJumpIfToBooleanFalseConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoJumpIfToBooleanFalseConstant(assembler);
 }
 
@@ -1490,7 +1449,7 @@
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNull(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
   Node* relative_jump = __ BytecodeOperandImm(0);
@@ -1502,8 +1461,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the object referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNullConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
   Node* index = __ BytecodeOperandIdx(0);
@@ -1517,17 +1475,15 @@
 //
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the object referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNullConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNullConstantWide(InterpreterAssembler* assembler) {
   DoJumpIfNullConstant(assembler);
 }
 
-
-// jumpifundefined <imm8>
+// JumpIfUndefined <imm8>
 //
 // Jump by number of bytes represented by an immediate operand if the object
 // referenced by the accumulator is the undefined constant.
-void Interpreter::DoJumpIfUndefined(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* undefined_value =
       __ HeapConstant(isolate_->factory()->undefined_value());
@@ -1540,8 +1496,7 @@
 //
 // Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
 // if the object referenced by the accumulator is the undefined constant.
-void Interpreter::DoJumpIfUndefinedConstant(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* undefined_value =
       __ HeapConstant(isolate_->factory()->undefined_value());
@@ -1557,13 +1512,44 @@
 // Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
 // if the object referenced by the accumulator is the undefined constant.
 void Interpreter::DoJumpIfUndefinedConstantWide(
-    compiler::InterpreterAssembler* assembler) {
+    InterpreterAssembler* assembler) {
   DoJumpIfUndefinedConstant(assembler);
 }
 
+// JumpIfNotHole <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is the hole.
+void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
+  Node* relative_jump = __ BytecodeOperandImm(0);
+  __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
+}
+
+// JumpIfNotHoleConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is the hole constant.
+void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* constant = __ LoadConstantPoolEntry(index);
+  Node* relative_jump = __ SmiUntag(constant);
+  __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
+}
+
+// JumpIfNotHoleConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the hole constant.
+void Interpreter::DoJumpIfNotHoleConstantWide(InterpreterAssembler* assembler) {
+  DoJumpIfNotHoleConstant(assembler);
+}
 
 void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
-                                  compiler::InterpreterAssembler* assembler) {
+                                  InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* constant_elements = __ LoadConstantPoolEntry(index);
   Node* literal_index_raw = __ BytecodeOperandIdx(1);
@@ -1571,7 +1557,8 @@
   Node* flags_raw = __ BytecodeOperandImm(2);
   Node* flags = __ SmiTag(flags_raw);
   Node* closure = __ LoadRegister(Register::function_closure());
-  Node* result = __ CallRuntime(function_id, closure, literal_index,
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(function_id, context, closure, literal_index,
                                 constant_elements, flags);
   __ SetAccumulator(result);
   __ Dispatch();
@@ -1582,8 +1569,7 @@
 //
 // Creates a regular expression literal for literal index <literal_idx> with
 // <flags> and the pattern in <pattern_idx>.
-void Interpreter::DoCreateRegExpLiteral(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
 }
 
@@ -1592,8 +1578,7 @@
 //
 // Creates a regular expression literal for literal index <literal_idx> with
 // <flags> and the pattern in <pattern_idx>.
-void Interpreter::DoCreateRegExpLiteralWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateRegExpLiteralWide(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
 }
 
@@ -1602,8 +1587,7 @@
 //
 // Creates an array literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateArrayLiteral(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
 }
 
@@ -1612,8 +1596,7 @@
 //
 // Creates an array literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateArrayLiteralWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateArrayLiteralWide(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
 }
 
@@ -1622,8 +1605,7 @@
 //
 // Creates an object literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateObjectLiteral(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
 }
 
@@ -1632,8 +1614,7 @@
 //
 // Creates an object literal for literal index <literal_idx> with flags <flags>
 // and constant elements in <element_idx>.
-void Interpreter::DoCreateObjectLiteralWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateObjectLiteralWide(InterpreterAssembler* assembler) {
   DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
 }
 
@@ -1642,15 +1623,16 @@
 //
 // Creates a new closure for SharedFunctionInfo at position |index| in the
 // constant pool and with the PretenureFlag <tenured>.
-void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
   // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
   // calling into the runtime.
   Node* index = __ BytecodeOperandIdx(0);
   Node* shared = __ LoadConstantPoolEntry(index);
   Node* tenured_raw = __ BytecodeOperandImm(1);
   Node* tenured = __ SmiTag(tenured_raw);
+  Node* context = __ GetContext();
   Node* result =
-      __ CallRuntime(Runtime::kInterpreterNewClosure, shared, tenured);
+      __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1660,8 +1642,7 @@
 //
 // Creates a new closure for SharedFunctionInfo at position |index| in the
 // constant pool and with the PretenureFlag <tenured>.
-void Interpreter::DoCreateClosureWide(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateClosureWide(InterpreterAssembler* assembler) {
   return DoCreateClosure(assembler);
 }
 
@@ -1669,10 +1650,11 @@
 // CreateMappedArguments
 //
 // Creates a new mapped arguments object.
-void Interpreter::DoCreateMappedArguments(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
   Node* closure = __ LoadRegister(Register::function_closure());
-  Node* result = __ CallRuntime(Runtime::kNewSloppyArguments_Generic, closure);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1681,21 +1663,56 @@
 // CreateUnmappedArguments
 //
 // Creates a new unmapped arguments object.
-void Interpreter::DoCreateUnmappedArguments(
-    compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::FastNewStrictArguments(isolate_);
+  Node* target = __ HeapConstant(callable.code());
+  Node* context = __ GetContext();
   Node* closure = __ LoadRegister(Register::function_closure());
-  Node* result = __ CallRuntime(Runtime::kNewStrictArguments_Generic, closure);
+  Node* result = __ CallStub(callable.descriptor(), target, context, closure);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
+// CreateRestParameter
+//
+// Creates a new rest parameter array.
+void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
+  Callable callable = CodeFactory::FastNewRestParameter(isolate_);
+  Node* target = __ HeapConstant(callable.code());
+  Node* closure = __ LoadRegister(Register::function_closure());
+  Node* context = __ GetContext();
+  Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+// StackCheck
+//
+// Performs a stack guard check.
+void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
+  __ StackCheck();
+  __ Dispatch();
+}
 
 // Throw
 //
 // Throws the exception in the accumulator.
-void Interpreter::DoThrow(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoThrow(InterpreterAssembler* assembler) {
   Node* exception = __ GetAccumulator();
-  __ CallRuntime(Runtime::kThrow, exception);
+  Node* context = __ GetContext();
+  __ CallRuntime(Runtime::kThrow, context, exception);
+  // We shouldn't ever return from a throw.
+  __ Abort(kUnexpectedReturnFromThrow);
+}
+
+
+// ReThrow
+//
+// Re-throws the exception in the accumulator.
+void Interpreter::DoReThrow(InterpreterAssembler* assembler) {
+  Node* exception = __ GetAccumulator();
+  Node* context = __ GetContext();
+  __ CallRuntime(Runtime::kReThrow, context, exception);
   // We shouldn't ever return from a throw.
   __ Abort(kUnexpectedReturnFromThrow);
 }
@@ -1704,59 +1721,105 @@
 // Return
 //
 // Return the value in the accumulator.
-void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
-  __ Return();
+void Interpreter::DoReturn(InterpreterAssembler* assembler) {
+  __ InterpreterReturn();
 }
 
+// Debugger
+//
+// Call runtime to handle debugger statement.
+void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
+  Node* context = __ GetContext();
+  __ CallRuntime(Runtime::kHandleDebuggerStatement, context);
+  __ Dispatch();
+}
 
-// ForInPrepare <cache_type> <cache_array> <cache_length>
+// DebugBreak
+//
+// Call runtime to handle a debug break.
+#define DEBUG_BREAK(Name, ...)                                              \
+  void Interpreter::Do##Name(InterpreterAssembler* assembler) {             \
+    Node* context = __ GetContext();                                        \
+    Node* original_handler = __ CallRuntime(Runtime::kDebugBreak, context); \
+    __ DispatchToBytecodeHandler(original_handler);                         \
+  }
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
+#undef DEBUG_BREAK
+
+// ForInPrepare <cache_info_triple>
 //
 // Returns state for for..in loop execution based on the object in the
-// accumulator. The registers |cache_type|, |cache_array|, and
-// |cache_length| represent output parameters.
-void Interpreter::DoForInPrepare(compiler::InterpreterAssembler* assembler) {
+// accumulator. The result is output in registers |cache_info_triple| to
+// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
+// and cache_length respectively.
+void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
   Node* object = __ GetAccumulator();
-  Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, object);
+  Node* context = __ GetContext();
+  Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object);
+
+  // Set output registers:
+  //   0 == cache_type, 1 == cache_array, 2 == cache_length
+  Node* output_register = __ BytecodeOperandReg(0);
   for (int i = 0; i < 3; i++) {
-    // 0 == cache_type, 1 == cache_array, 2 == cache_length
-    Node* cache_info = __ LoadFixedArrayElement(result, i);
-    Node* cache_info_reg = __ BytecodeOperandReg(i);
-    __ StoreRegister(cache_info, cache_info_reg);
+    Node* cache_info = __ Projection(i, result_triple);
+    __ StoreRegister(cache_info, output_register);
+    output_register = __ NextRegister(output_register);
   }
+  __ Dispatch();
+}
+
+
+// ForInPrepareWide <cache_info_triple>
+//
+// Returns state for for..in loop execution based on the object in the
+// accumulator. The result is output in registers |cache_info_triple| to
+// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
+// and cache_length respectively.
+void Interpreter::DoForInPrepareWide(InterpreterAssembler* assembler) {
+  DoForInPrepare(assembler);
+}
+
+
+// ForInNext <receiver> <index> <cache_info_pair>
+//
+// Returns the next enumerable property in the the accumulator.
+void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
+  Node* receiver_reg = __ BytecodeOperandReg(0);
+  Node* receiver = __ LoadRegister(receiver_reg);
+  Node* index_reg = __ BytecodeOperandReg(1);
+  Node* index = __ LoadRegister(index_reg);
+  Node* cache_type_reg = __ BytecodeOperandReg(2);
+  Node* cache_type = __ LoadRegister(cache_type_reg);
+  Node* cache_array_reg = __ NextRegister(cache_type_reg);
+  Node* cache_array = __ LoadRegister(cache_array_reg);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kForInNext, context, receiver,
+                                cache_array, cache_type, index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
 
-// ForInNext <receiver> <cache_type> <cache_array> <index>
+// ForInNextWide <receiver> <index> <cache_info_pair>
 //
 // Returns the next enumerable property in the the accumulator.
-void Interpreter::DoForInNext(compiler::InterpreterAssembler* assembler) {
-  Node* receiver_reg = __ BytecodeOperandReg(0);
-  Node* receiver = __ LoadRegister(receiver_reg);
-  Node* cache_type_reg = __ BytecodeOperandReg(1);
-  Node* cache_type = __ LoadRegister(cache_type_reg);
-  Node* cache_array_reg = __ BytecodeOperandReg(2);
-  Node* cache_array = __ LoadRegister(cache_array_reg);
-  Node* index_reg = __ BytecodeOperandReg(3);
-  Node* index = __ LoadRegister(index_reg);
-  Node* result = __ CallRuntime(Runtime::kForInNext, receiver, cache_array,
-                                cache_type, index);
-  __ SetAccumulator(result);
-  __ Dispatch();
+void Interpreter::DoForInNextWide(InterpreterAssembler* assembler) {
+  return DoForInNext(assembler);
 }
 
 
 // ForInDone <index> <cache_length>
 //
 // Returns true if the end of the enumerable properties has been reached.
-void Interpreter::DoForInDone(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
   // TODO(oth): Implement directly rather than making a runtime call.
   Node* index_reg = __ BytecodeOperandReg(0);
   Node* index = __ LoadRegister(index_reg);
   Node* cache_length_reg = __ BytecodeOperandReg(1);
   Node* cache_length = __ LoadRegister(cache_length_reg);
-  Node* result = __ CallRuntime(Runtime::kForInDone, index, cache_length);
+  Node* context = __ GetContext();
+  Node* result =
+      __ CallRuntime(Runtime::kForInDone, context, index, cache_length);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1766,11 +1829,12 @@
 //
 // Increments the loop counter in register |index| and stores the result
 // in the accumulator.
-void Interpreter::DoForInStep(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
   // TODO(oth): Implement directly rather than making a runtime call.
   Node* index_reg = __ BytecodeOperandReg(0);
   Node* index = __ LoadRegister(index_reg);
-  Node* result = __ CallRuntime(Runtime::kForInStep, index);
+  Node* context = __ GetContext();
+  Node* result = __ CallRuntime(Runtime::kForInStep, context, index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index ef9b5d1..e02e914 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -21,91 +21,113 @@
 class Callable;
 class CompilationInfo;
 
-namespace compiler {
-class InterpreterAssembler;
-}
-
 namespace interpreter {
 
+class InterpreterAssembler;
+
 class Interpreter {
  public:
   explicit Interpreter(Isolate* isolate);
   virtual ~Interpreter() {}
 
-  // Creates an uninitialized interpreter handler table, where each handler
-  // points to the Illegal builtin.
-  static Handle<FixedArray> CreateUninitializedInterpreterTable(
-      Isolate* isolate);
-
-  // Initializes the interpreter.
+  // Initializes the interpreter dispatch table.
   void Initialize();
 
+  // Returns the interrupt budget which should be used for the profiler counter.
+  static int InterruptBudget();
+
   // Generate bytecode for |info|.
   static bool MakeBytecode(CompilationInfo* info);
 
+  // Return bytecode handler for |bytecode|.
+  Code* GetBytecodeHandler(Bytecode bytecode);
+
+  // GC support.
+  void IterateDispatchTable(ObjectVisitor* v);
+
+  void TraceCodegen(Handle<Code> code, const char* name);
+
+  Address dispatch_table_address() {
+    return reinterpret_cast<Address>(&dispatch_table_[0]);
+  }
+
  private:
 // Bytecode handler generator functions.
 #define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, ...) \
-  void Do##Name(compiler::InterpreterAssembler* assembler);
+  void Do##Name(InterpreterAssembler* assembler);
   BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
 #undef DECLARE_BYTECODE_HANDLER_GENERATOR
 
   // Generates code to perform the binary operations via |function_id|.
   void DoBinaryOp(Runtime::FunctionId function_id,
-                  compiler::InterpreterAssembler* assembler);
+                  InterpreterAssembler* assembler);
 
   // Generates code to perform the count operations via |function_id|.
   void DoCountOp(Runtime::FunctionId function_id,
-                 compiler::InterpreterAssembler* assembler);
+                 InterpreterAssembler* assembler);
 
   // Generates code to perform the comparison operation associated with
   // |compare_op|.
-  void DoCompareOp(Token::Value compare_op,
-                   compiler::InterpreterAssembler* assembler);
+  void DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler);
 
   // Generates code to load a constant from the constant pool.
-  void DoLoadConstant(compiler::InterpreterAssembler* assembler);
+  void DoLoadConstant(InterpreterAssembler* assembler);
 
   // Generates code to perform a global load via |ic|.
-  void DoLoadGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoLoadGlobal(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a global store via |ic|.
-  void DoStoreGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoStoreGlobal(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a named property load via |ic|.
-  void DoLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoLoadIC(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a keyed property load via |ic|.
-  void DoKeyedLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a namedproperty store via |ic|.
-  void DoStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoStoreIC(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a keyed property store via |ic|.
-  void DoKeyedStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+  void DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler);
 
   // Generates code to perform a JS call.
-  void DoJSCall(compiler::InterpreterAssembler* assembler);
+  void DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode);
+
+  // Generates code to perform a runtime call.
+  void DoCallRuntimeCommon(InterpreterAssembler* assembler);
+
+  // Generates code to perform a runtime call returning a pair.
+  void DoCallRuntimeForPairCommon(InterpreterAssembler* assembler);
+
+  // Generates code to perform a JS runtime call.
+  void DoCallJSRuntimeCommon(InterpreterAssembler* assembler);
+
+  // Generates code to perform a constructor call..
+  void DoCallConstruct(InterpreterAssembler* assembler);
 
   // Generates code ro create a literal via |function_id|.
   void DoCreateLiteral(Runtime::FunctionId function_id,
-                       compiler::InterpreterAssembler* assembler);
+                       InterpreterAssembler* assembler);
 
   // Generates code to perform delete via function_id.
   void DoDelete(Runtime::FunctionId function_id,
-                compiler::InterpreterAssembler* assembler);
+                InterpreterAssembler* assembler);
 
   // Generates code to perform a lookup slot load via |function_id|.
   void DoLoadLookupSlot(Runtime::FunctionId function_id,
-                        compiler::InterpreterAssembler* assembler);
+                        InterpreterAssembler* assembler);
 
   // Generates code to perform a lookup slot store depending on |language_mode|.
   void DoStoreLookupSlot(LanguageMode language_mode,
-                         compiler::InterpreterAssembler* assembler);
+                         InterpreterAssembler* assembler);
 
-  bool IsInterpreterTableInitialized(Handle<FixedArray> handler_table);
+  bool IsDispatchTableInitialized();
+
+  static const int kDispatchTableSize = static_cast<int>(Bytecode::kLast) + 1;
 
   Isolate* isolate_;
+  Code* dispatch_table_[kDispatchTableSize];
 
   DISALLOW_COPY_AND_ASSIGN(Interpreter);
 };
diff --git a/src/interpreter/register-translator.cc b/src/interpreter/register-translator.cc
new file mode 100644
index 0000000..3eba42f
--- /dev/null
+++ b/src/interpreter/register-translator.cc
@@ -0,0 +1,173 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/register-translator.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+RegisterTranslator::RegisterTranslator(RegisterMover* mover)
+    : mover_(mover),
+      emitting_moves_(false),
+      window_registers_count_(0),
+      output_moves_count_(0) {}
+
+void RegisterTranslator::TranslateInputRegisters(Bytecode bytecode,
+                                                 uint32_t* raw_operands,
+                                                 int raw_operand_count) {
+  DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), raw_operand_count);
+  if (!emitting_moves_) {
+    emitting_moves_ = true;
+    DCHECK_EQ(window_registers_count_, 0);
+    int register_bitmap = Bytecodes::GetRegisterOperandBitmap(bytecode);
+    for (int i = 0; i < raw_operand_count; i++) {
+      if ((register_bitmap & (1 << i)) == 0) {
+        continue;
+      }
+      Register in_reg = Register::FromRawOperand(raw_operands[i]);
+      Register out_reg = TranslateAndMove(bytecode, i, in_reg);
+      raw_operands[i] = out_reg.ToRawOperand();
+    }
+    window_registers_count_ = 0;
+    emitting_moves_ = false;
+  } else {
+    // When the register translator is translating registers, it will
+    // cause the bytecode generator to emit moves on it's behalf. This
+    // path is reached by these moves.
+    DCHECK(bytecode == Bytecode::kMovWide && raw_operand_count == 2 &&
+           Register::FromRawOperand(raw_operands[0]).is_valid() &&
+           Register::FromRawOperand(raw_operands[1]).is_valid());
+  }
+}
+
+Register RegisterTranslator::TranslateAndMove(Bytecode bytecode,
+                                              int operand_index, Register reg) {
+  if (FitsInReg8Operand(reg)) {
+    return reg;
+  }
+
+  OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
+  OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
+  if (operand_size == OperandSize::kShort) {
+    CHECK(FitsInReg16Operand(reg));
+    return Translate(reg);
+  }
+
+  CHECK((operand_type == OperandType::kReg8 ||
+         operand_type == OperandType::kRegOut8) &&
+        RegisterIsMovableToWindow(bytecode, operand_index));
+  Register translated_reg = Translate(reg);
+  Register window_reg(kTranslationWindowStart + window_registers_count_);
+  window_registers_count_ += 1;
+  if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
+    DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_type));
+    mover()->MoveRegisterUntranslated(translated_reg, window_reg);
+  } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
+    DCHECK_LT(output_moves_count_, kTranslationWindowLength);
+    output_moves_[output_moves_count_] =
+        std::make_pair(window_reg, translated_reg);
+    output_moves_count_ += 1;
+  } else {
+    UNREACHABLE();
+  }
+  return window_reg;
+}
+
+// static
+bool RegisterTranslator::RegisterIsMovableToWindow(Bytecode bytecode,
+                                                   int operand_index) {
+  // By design, we only support moving individual registers. There
+  // should be wide variants of such bytecodes instead to avoid the
+  // need for a large translation window.
+  OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
+  if (operand_type != OperandType::kReg8 &&
+      operand_type != OperandType::kRegOut8) {
+    return false;
+  } else if (operand_index + 1 == Bytecodes::NumberOfOperands(bytecode)) {
+    return true;
+  } else {
+    OperandType next_operand_type =
+        Bytecodes::GetOperandType(bytecode, operand_index + 1);
+    return (next_operand_type != OperandType::kRegCount8 &&
+            next_operand_type != OperandType::kRegCount16);
+  }
+}
+
+void RegisterTranslator::TranslateOutputRegisters() {
+  if (!emitting_moves_) {
+    emitting_moves_ = true;
+    while (output_moves_count_ > 0) {
+      output_moves_count_ -= 1;
+      mover()->MoveRegisterUntranslated(
+          output_moves_[output_moves_count_].first,
+          output_moves_[output_moves_count_].second);
+    }
+    emitting_moves_ = false;
+  }
+}
+
+// static
+Register RegisterTranslator::Translate(Register reg) {
+  if (reg.index() >= kTranslationWindowStart) {
+    return Register(reg.index() + kTranslationWindowLength);
+  } else {
+    return reg;
+  }
+}
+
+// static
+bool RegisterTranslator::InTranslationWindow(Register reg) {
+  return (reg.index() >= kTranslationWindowStart &&
+          reg.index() <= kTranslationWindowLimit);
+}
+
+// static
+Register RegisterTranslator::UntranslateRegister(Register reg) {
+  if (reg.index() >= kTranslationWindowStart) {
+    return Register(reg.index() - kTranslationWindowLength);
+  } else {
+    return reg;
+  }
+}
+
+// static
+int RegisterTranslator::DistanceToTranslationWindow(Register reg) {
+  return kTranslationWindowStart - reg.index();
+}
+
+// static
+bool RegisterTranslator::FitsInReg8Operand(Register reg) {
+  return reg.is_byte_operand() && reg.index() < kTranslationWindowStart;
+}
+
+// static
+bool RegisterTranslator::FitsInReg16Operand(Register reg) {
+  int max_index = Register::MaxRegisterIndex() - kTranslationWindowLength + 1;
+  return reg.is_short_operand() && reg.index() < max_index;
+}
+
+// static
+int RegisterTranslator::RegisterCountAdjustment(int register_count,
+                                                int parameter_count) {
+  if (register_count > kTranslationWindowStart) {
+    return kTranslationWindowLength;
+  } else if (parameter_count > 0) {
+    Register param0 = Register::FromParameterIndex(0, parameter_count);
+    if (!param0.is_byte_operand()) {
+      // TODO(oth): Number of parameters means translation is
+      // required, but the translation window location is such that
+      // some space is wasted. Hopefully a rare corner case, but could
+      // relocate window to limit waste.
+      return kTranslationWindowLimit + 1 - register_count;
+    }
+  }
+  return 0;
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/register-translator.h b/src/interpreter/register-translator.h
new file mode 100644
index 0000000..b683a89
--- /dev/null
+++ b/src/interpreter/register-translator.h
@@ -0,0 +1,119 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_REGISTER_TRANSLATOR_H_
+#define V8_INTERPRETER_REGISTER_TRANSLATOR_H_
+
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class RegisterMover;
+
+// A class that enables bytecodes having only byte sized register operands
+// to access all registers in the two byte space. Most bytecode uses few
+// registers so space can be saved if most bytecodes with register operands
+// just take byte operands.
+//
+// To reach the wider register space, a translation window is reserved in
+// the byte addressable space specifically for copying registers into and
+// out of before a bytecode is emitted. The translation window occupies
+// the last register slots at the top of the byte addressable range.
+//
+// Because of the translation window any registers which naturally lie
+// at above the translation window have to have their register index
+// incremented by the window width before they are emitted.
+//
+// This class does not support moving ranges of registers to and from
+// the translation window. It would be straightforward to add support
+// for constrained ranges, e.g. kRegPair8, kRegTriple8 operands, but
+// these would have two negative effects. The translation window would
+// need to be wider, further limiting the space for byte operands. And
+// every register in a range would need to be moved consuming more
+// space in the bytecode array.
+class RegisterTranslator final {
+ public:
+  explicit RegisterTranslator(RegisterMover* mover);
+
+  // Translate and re-write the register operands that are inputs
+  // to |bytecode| when it is about to be emitted.
+  void TranslateInputRegisters(Bytecode bytecode, uint32_t* raw_operands,
+                               int raw_operand_count);
+
+  // Translate and re-write the register operands that are outputs
+  // from |bytecode| when it has just been output.
+  void TranslateOutputRegisters();
+
+  // Returns true if |reg| is in the translation window.
+  static bool InTranslationWindow(Register reg);
+
+  // Return register value as if it had been translated.
+  static Register UntranslateRegister(Register reg);
+
+  // Returns the distance in registers between the translation window
+  // start and |reg|. The result is negative when |reg| is above the
+  // start of the translation window.
+  static int DistanceToTranslationWindow(Register reg);
+
+  // Returns true if |reg| can be represented as an 8-bit operand
+  // after translation.
+  static bool FitsInReg8Operand(Register reg);
+
+  // Returns true if |reg| can be represented as an 16-bit operand
+  // after translation.
+  static bool FitsInReg16Operand(Register reg);
+
+  // Returns the increment to the register count necessary if the
+  // value indicates the translation window is required.
+  static int RegisterCountAdjustment(int register_count, int parameter_count);
+
+ private:
+  static const int kTranslationWindowLength = 4;
+  static const int kTranslationWindowLimit = -kMinInt8;
+  static const int kTranslationWindowStart =
+      kTranslationWindowLimit - kTranslationWindowLength + 1;
+
+  Register TranslateAndMove(Bytecode bytecode, int operand_index, Register reg);
+  static bool RegisterIsMovableToWindow(Bytecode bytecode, int operand_index);
+
+  static Register Translate(Register reg);
+
+  RegisterMover* mover() const { return mover_; }
+
+  // Entity to perform register moves necessary to translate registers
+  // and ensure reachability.
+  RegisterMover* mover_;
+
+  // Flag to avoid re-entrancy when emitting move bytecodes for
+  // translation.
+  bool emitting_moves_;
+
+  // Number of window registers in use.
+  int window_registers_count_;
+
+  // State for restoring register moves emitted by TranslateOutputRegisters.
+  std::pair<Register, Register> output_moves_[kTranslationWindowLength];
+  int output_moves_count_;
+};
+
+// Interface for RegisterTranslator helper class that will emit
+// register move bytecodes at the translator's behest.
+class RegisterMover {
+ public:
+  virtual ~RegisterMover() {}
+
+  // Move register |from| to register |to| with no translation.
+  // returns false if either register operand is invalid. Implementations
+  // of this method must be aware that register moves with bad
+  // register values are a security hole.
+  virtual void MoveRegisterUntranslated(Register from, Register to) = 0;
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_REGISTER_TRANSLATOR_H_
diff --git a/src/interpreter/source-position-table.cc b/src/interpreter/source-position-table.cc
new file mode 100644
index 0000000..0b7c44e
--- /dev/null
+++ b/src/interpreter/source-position-table.cc
@@ -0,0 +1,84 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/source-position-table.h"
+
+#include "src/assembler.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class IsStatementField : public BitField<bool, 0, 1> {};
+class SourcePositionField : public BitField<int, 1, 30> {};
+
+void SourcePositionTableBuilder::AddStatementPosition(size_t bytecode_offset,
+                                                      int source_position) {
+  int offset = static_cast<int>(bytecode_offset);
+  // If a position has already been assigned to this bytecode offset,
+  // do not reassign a new statement position.
+  if (CodeOffsetHasPosition(offset)) return;
+  uint32_t encoded = IsStatementField::encode(true) |
+                     SourcePositionField::encode(source_position);
+  entries_.push_back({offset, encoded});
+}
+
+void SourcePositionTableBuilder::AddExpressionPosition(size_t bytecode_offset,
+                                                       int source_position) {
+  int offset = static_cast<int>(bytecode_offset);
+  // If a position has already been assigned to this bytecode offset,
+  // do not reassign a new statement position.
+  if (CodeOffsetHasPosition(offset)) return;
+  uint32_t encoded = IsStatementField::encode(false) |
+                     SourcePositionField::encode(source_position);
+  entries_.push_back({offset, encoded});
+}
+
+void SourcePositionTableBuilder::RevertPosition(size_t bytecode_offset) {
+  int offset = static_cast<int>(bytecode_offset);
+  // If we already added a source position table entry, but the bytecode array
+  // builder ended up not outputting a bytecode for the corresponding bytecode
+  // offset, we have to remove that entry.
+  if (CodeOffsetHasPosition(offset)) entries_.pop_back();
+}
+
+Handle<FixedArray> SourcePositionTableBuilder::ToFixedArray() {
+  int length = static_cast<int>(entries_.size());
+  Handle<FixedArray> table =
+      isolate_->factory()->NewFixedArray(length * 2, TENURED);
+  for (int i = 0; i < length; i++) {
+    table->set(i * 2, Smi::FromInt(entries_[i].bytecode_offset));
+    table->set(i * 2 + 1, Smi::FromInt(entries_[i].source_position_and_type));
+  }
+  return table;
+}
+
+SourcePositionTableIterator::SourcePositionTableIterator(
+    BytecodeArray* bytecode_array)
+    : table_(bytecode_array->source_position_table()),
+      index_(0),
+      length_(table_->length()) {
+  DCHECK(table_->length() % 2 == 0);
+  Advance();
+}
+
+void SourcePositionTableIterator::Advance() {
+  if (index_ < length_) {
+    int new_bytecode_offset = Smi::cast(table_->get(index_))->value();
+    // Bytecode offsets are in ascending order.
+    DCHECK(bytecode_offset_ < new_bytecode_offset || index_ == 0);
+    bytecode_offset_ = new_bytecode_offset;
+    uint32_t source_position_and_type =
+        static_cast<uint32_t>(Smi::cast(table_->get(index_ + 1))->value());
+    is_statement_ = IsStatementField::decode(source_position_and_type);
+    source_position_ = SourcePositionField::decode(source_position_and_type);
+  }
+  index_ += 2;
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/source-position-table.h b/src/interpreter/source-position-table.h
new file mode 100644
index 0000000..336cf42
--- /dev/null
+++ b/src/interpreter/source-position-table.h
@@ -0,0 +1,82 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
+#define V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
+
+#include "src/assert-scope.h"
+#include "src/handles.h"
+#include "src/zone.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+class FixedArray;
+class Isolate;
+
+namespace interpreter {
+
+class SourcePositionTableBuilder {
+ public:
+  explicit SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
+      : isolate_(isolate), entries_(zone) {}
+
+  void AddStatementPosition(size_t bytecode_offset, int source_position);
+  void AddExpressionPosition(size_t bytecode_offset, int source_position);
+  void RevertPosition(size_t bytecode_offset);
+  Handle<FixedArray> ToFixedArray();
+
+ private:
+  struct Entry {
+    int bytecode_offset;
+    uint32_t source_position_and_type;
+  };
+
+  bool CodeOffsetHasPosition(int bytecode_offset) {
+    // Return whether bytecode offset already has a position assigned.
+    return entries_.size() > 0 &&
+           entries_.back().bytecode_offset == bytecode_offset;
+  }
+
+  Isolate* isolate_;
+  ZoneVector<Entry> entries_;
+};
+
+class SourcePositionTableIterator {
+ public:
+  explicit SourcePositionTableIterator(BytecodeArray* bytecode_array);
+
+  void Advance();
+
+  int bytecode_offset() const {
+    DCHECK(!done());
+    return bytecode_offset_;
+  }
+  int source_position() const {
+    DCHECK(!done());
+    return source_position_;
+  }
+  bool is_statement() const {
+    DCHECK(!done());
+    return is_statement_;
+  }
+  bool done() const { return index_ > length_; }
+
+ private:
+  FixedArray* table_;
+  int index_;
+  int length_;
+  bool is_statement_;
+  int bytecode_offset_;
+  int source_position_;
+  DisallowHeapAllocation no_gc;
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
diff --git a/src/isolate.cc b/src/isolate.cc
index 4e42b43..8116f14 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -390,8 +390,9 @@
       }
       DCHECK(cursor + 4 <= elements->length());
 
-      Handle<Code> code = frames[i].code();
-      Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
+      Handle<AbstractCode> abstract_code = frames[i].abstract_code();
+
+      Handle<Smi> offset(Smi::FromInt(frames[i].code_offset()), this);
       // The stack trace API should not expose receivers and function
       // objects on frames deeper than the top-most one with a strict
       // mode function.  The number of sloppy frames is stored as
@@ -405,7 +406,7 @@
       }
       elements->set(cursor++, *recv);
       elements->set(cursor++, *fun);
-      elements->set(cursor++, *code);
+      elements->set(cursor++, *abstract_code);
       elements->set(cursor++, *offset);
       frames_seen++;
     }
@@ -594,9 +595,9 @@
   if (maybe_code->IsSmi()) {
     return Smi::cast(maybe_code)->value();
   } else {
-    Code* code = Code::cast(maybe_code);
-    Address pc = code->address() + Smi::cast(elements->get(index + 3))->value();
-    return code->SourcePosition(pc);
+    AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
+    int code_offset = Smi::cast(elements->get(index + 3))->value();
+    return abstract_code->SourcePosition(code_offset);
   }
 }
 
@@ -661,7 +662,8 @@
       // Filter frames from other security contexts.
       if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
           !this->context()->HasSameSecurityTokenAs(fun->context())) continue;
-      int position = frames[i].code()->SourcePosition(frames[i].pc());
+      int position =
+          frames[i].abstract_code()->SourcePosition(frames[i].code_offset());
       Handle<JSObject> stack_frame =
           helper.NewStackFrameObject(fun, position, frames[i].is_constructor());
 
@@ -780,14 +782,6 @@
 }
 
 
-bool Isolate::IsInternallyUsedPropertyName(Handle<Object> name) {
-  if (name->IsSymbol()) {
-    return Handle<Symbol>::cast(name)->is_private();
-  }
-  return name.is_identical_to(factory()->hidden_string());
-}
-
-
 bool Isolate::MayAccess(Handle<Context> accessing_context,
                         Handle<JSObject> receiver) {
   DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
@@ -826,11 +820,11 @@
     if (!access_check_info) return false;
     Object* fun_obj = access_check_info->callback();
     callback = v8::ToCData<v8::AccessCheckCallback>(fun_obj);
+    data = handle(access_check_info->data(), this);
     if (!callback) {
       fun_obj = access_check_info->named_callback();
       named_callback = v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
       if (!named_callback) return false;
-      data = handle(access_check_info->data(), this);
     }
   }
 
@@ -841,7 +835,7 @@
     VMState<EXTERNAL> state(this);
     if (callback) {
       return callback(v8::Utils::ToLocal(accessing_context),
-                      v8::Utils::ToLocal(receiver));
+                      v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(data));
     }
     Handle<Object> key = factory()->undefined_value();
     return named_callback(v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(key),
@@ -1100,34 +1094,63 @@
     if (frame->is_optimized() && catchable_by_js) {
       OptimizedFrame* js_frame = static_cast<OptimizedFrame*>(frame);
       int stack_slots = 0;  // Will contain stack slot count of frame.
-      offset = js_frame->LookupExceptionHandlerInTable(&stack_slots, NULL);
+      offset = js_frame->LookupExceptionHandlerInTable(&stack_slots, nullptr);
       if (offset >= 0) {
         // Compute the stack pointer from the frame pointer. This ensures that
         // argument slots on the stack are dropped as returning would.
-        Address return_sp = frame->fp() -
-                            StandardFrameConstants::kFixedFrameSizeFromFp -
+        Address return_sp = frame->fp() +
+                            StandardFrameConstants::kFixedFrameSizeAboveFp -
                             stack_slots * kPointerSize;
 
         // Gather information from the frame.
         code = frame->LookupCode();
+        if (code->marked_for_deoptimization()) {
+          // If the target code is lazy deoptimized, we jump to the original
+          // return address, but we make a note that we are throwing, so that
+          // the deoptimizer can do the right thing.
+          offset = static_cast<int>(frame->pc() - code->entry());
+          set_deoptimizer_lazy_throw(true);
+        }
         handler_sp = return_sp;
         handler_fp = frame->fp();
         break;
       }
     }
 
+    // For interpreted frame we perform a range lookup in the handler table.
+    if (frame->is_interpreted() && catchable_by_js) {
+      InterpretedFrame* js_frame = static_cast<InterpretedFrame*>(frame);
+      int context_reg = 0;  // Will contain register index holding context.
+      offset = js_frame->LookupExceptionHandlerInTable(&context_reg, nullptr);
+      if (offset >= 0) {
+        // Patch the bytecode offset in the interpreted frame to reflect the
+        // position of the exception handler. The special builtin below will
+        // take care of continuing to dispatch at that position. Also restore
+        // the correct context for the handler from the interpreter register.
+        context = Context::cast(js_frame->GetInterpreterRegister(context_reg));
+        js_frame->PatchBytecodeOffset(static_cast<int>(offset));
+        offset = 0;
+
+        // Gather information from the frame.
+        code = *builtins()->InterpreterEnterBytecodeDispatch();
+        handler_sp = frame->sp();
+        handler_fp = frame->fp();
+        break;
+      }
+    }
+
     // For JavaScript frames we perform a range lookup in the handler table.
     if (frame->is_java_script() && catchable_by_js) {
       JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
-      int stack_slots = 0;  // Will contain operand stack depth of handler.
-      offset = js_frame->LookupExceptionHandlerInTable(&stack_slots, NULL);
+      int stack_depth = 0;  // Will contain operand stack depth of handler.
+      offset = js_frame->LookupExceptionHandlerInTable(&stack_depth, nullptr);
       if (offset >= 0) {
         // Compute the stack pointer from the frame pointer. This ensures that
         // operand stack slots are dropped for nested statements. Also restore
         // correct context for the handler which is pushed within the try-block.
         Address return_sp = frame->fp() -
                             StandardFrameConstants::kFixedFrameSizeFromFp -
-                            stack_slots * kPointerSize;
+                            stack_depth * kPointerSize;
         STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
         context = Context::cast(Memory::Object_at(return_sp - kPointerSize));
 
@@ -1175,10 +1198,8 @@
     // For JavaScript frames we perform a lookup in the handler table.
     if (frame->is_java_script()) {
       JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
-      int stack_slots = 0;  // The computed stack slot count is not used.
       HandlerTable::CatchPrediction prediction;
-      if (js_frame->LookupExceptionHandlerInTable(&stack_slots, &prediction) >
-          0) {
+      if (js_frame->LookupExceptionHandlerInTable(nullptr, &prediction) > 0) {
         // We are conservative with our prediction: try-finally is considered
         // to always rethrow, to meet the expectation of the debugger.
         if (prediction == HandlerTable::CAUGHT) return CAUGHT_BY_JAVASCRIPT;
@@ -1263,7 +1284,9 @@
     HandleScope scope(this);
     // Find code position if recorded in relocation info.
     JavaScriptFrame* frame = it.frame();
-    int pos = frame->LookupCode()->SourcePosition(frame->pc());
+    Code* code = frame->LookupCode();
+    int offset = static_cast<int>(frame->pc() - code->instruction_start());
+    int pos = frame->LookupCode()->SourcePosition(offset);
     Handle<Object> pos_obj(Smi::FromInt(pos), this);
     // Fetch function and receiver.
     Handle<JSFunction> fun(frame->function());
@@ -1298,7 +1321,7 @@
       List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
       it.frame()->Summarize(&frames);
       FrameSummary& summary = frames.last();
-      int pos = summary.code()->SourcePosition(summary.pc());
+      int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
       *target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
       return true;
     }
@@ -1586,8 +1609,7 @@
   if (PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT) return undefined;
   for (JavaScriptFrameIterator it(this); !it.done(); it.Advance()) {
     JavaScriptFrame* frame = it.frame();
-    int stack_slots = 0;  // The computed stack slot count is not used.
-    if (frame->LookupExceptionHandlerInTable(&stack_slots, NULL) > 0) {
+    if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) {
       // Throwing inside a Promise only leads to a reject if not caught by an
       // inner try-catch or try-finally.
       if (frame->function() == *promise_function) {
@@ -1732,7 +1754,6 @@
 #define TRACE_ISOLATE(tag)
 #endif
 
-
 Isolate::Isolate(bool enable_serializer)
     : embedder_data_(),
       entry_stack_(NULL),
@@ -1748,6 +1769,7 @@
       stub_cache_(NULL),
       code_aging_helper_(NULL),
       deoptimizer_data_(NULL),
+      deoptimizer_lazy_throw_(false),
       materialized_object_store_(NULL),
       capture_stack_trace_for_uncaught_exceptions_(false),
       stack_trace_for_uncaught_exceptions_frame_limit_(0),
@@ -1903,9 +1925,6 @@
   Sampler* sampler = logger_->sampler();
   if (sampler && sampler->IsActive()) sampler->Stop();
 
-  delete interpreter_;
-  interpreter_ = NULL;
-
   delete deoptimizer_data_;
   deoptimizer_data_ = NULL;
   builtins_.TearDown();
@@ -1919,13 +1938,17 @@
   delete basic_block_profiler_;
   basic_block_profiler_ = NULL;
 
+  delete heap_profiler_;
+  heap_profiler_ = NULL;
+
   heap_.TearDown();
   logger_->TearDown();
 
+  delete interpreter_;
+  interpreter_ = NULL;
+
   cancelable_task_manager()->CancelAndWait();
 
-  delete heap_profiler_;
-  heap_profiler_ = NULL;
   delete cpu_profiler_;
   cpu_profiler_ = NULL;
 
@@ -2376,6 +2399,11 @@
   turbo_statistics_ = nullptr;
   delete hstatistics_;
   hstatistics_ = nullptr;
+  if (FLAG_runtime_call_stats) {
+    OFStream os(stdout);
+    counters()->runtime_call_stats()->Print(os);
+    counters()->runtime_call_stats()->Reset();
+  }
 }
 
 
@@ -2483,8 +2511,37 @@
   return cell_reports_intact;
 }
 
+bool Isolate::IsArraySpeciesLookupChainIntact() {
+  if (!FLAG_harmony_species) return true;
+  // Note: It would be nice to have debug checks to make sure that the
+  // species protector is accurate, but this would be hard to do for most of
+  // what the protector stands for:
+  // - You'd need to traverse the heap to check that no Array instance has
+  //   a constructor property or a modified __proto__
+  // - To check that Array[Symbol.species] == Array, JS code has to execute,
+  //   but JS cannot be invoked in callstack overflow situations
+  // All that could be checked reliably is that
+  // Array.prototype.constructor == Array. Given that limitation, no check is
+  // done here. In place, there are mjsunit tests harmony/array-species* which
+  // ensure that behavior is correct in various invalid protector cases.
+
+  PropertyCell* species_cell = heap()->species_protector();
+  return species_cell->value()->IsSmi() &&
+         Smi::cast(species_cell->value())->value() == kArrayProtectorValid;
+}
+
+void Isolate::InvalidateArraySpeciesProtector() {
+  if (!FLAG_harmony_species) return;
+  DCHECK(factory()->species_protector()->value()->IsSmi());
+  DCHECK(IsArraySpeciesLookupChainIntact());
+  PropertyCell::SetValueWithInvalidation(
+      factory()->species_protector(),
+      handle(Smi::FromInt(kArrayProtectorInvalid), this));
+  DCHECK(!IsArraySpeciesLookupChainIntact());
+}
 
 void Isolate::UpdateArrayProtectorOnSetElement(Handle<JSObject> object) {
+  DisallowHeapAllocation no_gc;
   if (IsFastArrayConstructorPrototypeChainIntact() &&
       object->map()->is_prototype_map()) {
     Object* context = heap()->native_contexts_list();
@@ -2494,6 +2551,7 @@
               *object ||
           current_context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ==
               *object) {
+        CountUsage(v8::Isolate::UseCounterFeature::kArrayProtectorDirtied);
         PropertyCell::SetValueWithInvalidation(
             factory()->array_protector(),
             handle(Smi::FromInt(kArrayProtectorInvalid), this));
@@ -2580,6 +2638,31 @@
 }
 
 
+void Isolate::AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback) {
+  for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
+    if (callback == before_call_entered_callbacks_.at(i)) return;
+  }
+  before_call_entered_callbacks_.Add(callback);
+}
+
+
+void Isolate::RemoveBeforeCallEnteredCallback(
+    BeforeCallEnteredCallback callback) {
+  for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
+    if (callback == before_call_entered_callbacks_.at(i)) {
+      before_call_entered_callbacks_.Remove(i);
+    }
+  }
+}
+
+
+void Isolate::FireBeforeCallEnteredCallback() {
+  for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
+    before_call_entered_callbacks_.at(i)(reinterpret_cast<v8::Isolate*>(this));
+  }
+}
+
+
 void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) {
   for (int i = 0; i < call_completed_callbacks_.length(); i++) {
     if (callback == call_completed_callbacks_.at(i)) return;
@@ -2605,10 +2688,10 @@
   if (!handle_scope_implementer()->CallDepthIsZero()) return;
   if (run_microtasks) RunMicrotasks();
   // Fire callbacks.  Increase call depth to prevent recursive callbacks.
-  v8::Isolate::SuppressMicrotaskExecutionScope suppress(
-      reinterpret_cast<v8::Isolate*>(this));
+  v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this);
+  v8::Isolate::SuppressMicrotaskExecutionScope suppress(isolate);
   for (int i = 0; i < call_completed_callbacks_.length(); i++) {
-    call_completed_callbacks_.at(i)();
+    call_completed_callbacks_.at(i)(isolate);
   }
 }
 
diff --git a/src/isolate.h b/src/isolate.h
index 40c8157..2d74dc4 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -683,8 +683,6 @@
   // set.
   bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
 
-  bool IsInternallyUsedPropertyName(Handle<Object> name);
-
   void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
   void ReportFailedAccessCheck(Handle<JSObject> receiver);
 
@@ -820,6 +818,10 @@
   StubCache* stub_cache() { return stub_cache_; }
   CodeAgingHelper* code_aging_helper() { return code_aging_helper_; }
   DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
+  bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
+  void set_deoptimizer_lazy_throw(bool value) {
+    deoptimizer_lazy_throw_ = value;
+  }
   ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
   MaterializedObjectStore* materialized_object_store() {
     return materialized_object_store_;
@@ -891,7 +893,7 @@
 
   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
       interp_canonicalize_mapping() {
-    return &interp_canonicalize_mapping_;
+    return &regexp_macro_assembler_canonicalize_;
   }
 
   Debug* debug() { return debug_; }
@@ -958,6 +960,7 @@
   static const int kArrayProtectorInvalid = 0;
 
   bool IsFastArrayConstructorPrototypeChainIntact();
+  bool IsArraySpeciesLookupChainIntact();
 
   // On intent to set an element in object, make sure that appropriate
   // notifications occur if the set is on the elements of the array or
@@ -973,6 +976,7 @@
   void UpdateArrayProtectorOnNormalizeElements(Handle<JSObject> object) {
     UpdateArrayProtectorOnSetElement(object);
   }
+  void InvalidateArraySpeciesProtector();
 
   // Returns true if array is the initial array prototype in any native context.
   bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
@@ -1053,6 +1057,10 @@
   void RemoveCallCompletedCallback(CallCompletedCallback callback);
   void FireCallCompletedCallback();
 
+  void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
+  void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
+  void FireBeforeCallEnteredCallback();
+
   void SetPromiseRejectCallback(PromiseRejectCallback callback);
   void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
                            v8::PromiseRejectEvent event);
@@ -1218,6 +1226,7 @@
   StubCache* stub_cache_;
   CodeAgingHelper* code_aging_helper_;
   DeoptimizerData* deoptimizer_data_;
+  bool deoptimizer_lazy_throw_;
   MaterializedObjectStore* materialized_object_store_;
   ThreadLocalTop thread_local_top_;
   bool capture_stack_trace_for_uncaught_exceptions_;
@@ -1245,7 +1254,6 @@
       regexp_macro_assembler_canonicalize_;
   RegExpStack* regexp_stack_;
   DateCache* date_cache_;
-  unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
   CallInterfaceDescriptorData* call_descriptor_data_;
   base::RandomNumberGenerator* random_number_generator_;
 
@@ -1316,6 +1324,9 @@
   int next_unique_sfi_id_;
 #endif
 
+  // List of callbacks before a Call starts execution.
+  List<BeforeCallEnteredCallback> before_call_entered_callbacks_;
+
   // List of callbacks when a Call completes.
   List<CallCompletedCallback> call_completed_callbacks_;
 
diff --git a/src/js/array.js b/src/js/array.js
index f9cf161..0a5e283 100644
--- a/src/js/array.js
+++ b/src/js/array.js
@@ -12,7 +12,6 @@
 // Imports
 
 var AddIndexedProperty;
-var FLAG_harmony_tolength;
 var FLAG_harmony_species;
 var GetIterator;
 var GetMethod;
@@ -28,7 +27,6 @@
 var ObserveBeginPerformSplice;
 var ObserveEndPerformSplice;
 var ObserveEnqueueSpliceRecord;
-var SameValueZero;
 var iteratorSymbol = utils.ImportNow("iterator_symbol");
 var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
 
@@ -44,11 +42,9 @@
   ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
   ObserveEndPerformSplice = from.ObserveEndPerformSplice;
   ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
-  SameValueZero = from.SameValueZero;
 });
 
 utils.ImportFromExperimental(function(from) {
-  FLAG_harmony_tolength = from.FLAG_harmony_tolength;
   FLAG_harmony_species = from.FLAG_harmony_species;
 });
 
@@ -213,8 +209,6 @@
         elements[elements_length++] = e;
       }
       elements.length = elements_length;
-      var result = %_FastOneByteArrayJoin(elements, '');
-      if (!IS_UNDEFINED(result)) return result;
       return %StringBuilderConcat(elements, elements_length, '');
     }
     // Non-empty separator case.
@@ -237,9 +231,6 @@
         elements[i] = e;
       }
     }
-    var result = %_FastOneByteArrayJoin(elements, separator);
-    if (!IS_UNDEFINED(result)) return result;
-
     return %StringBuilderJoin(elements, length, separator);
   } finally {
     // Make sure to remove the last element of the visited array no
@@ -431,7 +422,7 @@
 
 
 function InnerArrayToLocaleString(array, length) {
-  var len = TO_LENGTH_OR_UINT32(length);
+  var len = TO_LENGTH(length);
   if (len === 0) return "";
   return Join(array, len, ',', ConvertToLocaleString);
 }
@@ -451,9 +442,6 @@
     separator = TO_STRING(separator);
   }
 
-  var result = %_FastOneByteArrayJoin(array, separator);
-  if (!IS_UNDEFINED(result)) return result;
-
   // Fast case for one-element arrays.
   if (length === 1) {
     var e = array[0];
@@ -469,7 +457,7 @@
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
 
   var array = TO_OBJECT(this);
-  var length = TO_LENGTH_OR_UINT32(array.length);
+  var length = TO_LENGTH(array.length);
 
   return InnerArrayJoin(separator, array, length);
 }
@@ -498,7 +486,7 @@
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.pop");
 
   var array = TO_OBJECT(this);
-  var n = TO_LENGTH_OR_UINT32(array.length);
+  var n = TO_LENGTH(array.length);
   if (n == 0) {
     array.length = n;
     return;
@@ -516,13 +504,13 @@
 
 
 function ObservedArrayPush() {
-  var n = TO_LENGTH_OR_UINT32(this.length);
-  var m = %_ArgumentsLength();
+  var n = TO_LENGTH(this.length);
+  var m = arguments.length;
 
   try {
     ObserveBeginPerformSplice(this);
     for (var i = 0; i < m; i++) {
-      this[i+n] = %_Arguments(i);
+      this[i+n] = arguments[i];
     }
     var new_length = n + m;
     this.length = new_length;
@@ -544,8 +532,8 @@
     return ObservedArrayPush.apply(this, arguments);
 
   var array = TO_OBJECT(this);
-  var n = TO_LENGTH_OR_UINT32(array.length);
-  var m = %_ArgumentsLength();
+  var n = TO_LENGTH(array.length);
+  var m = arguments.length;
 
   // It appears that there is no enforced, absolute limit on the number of
   // arguments, but it would surely blow the stack to use 2**30 or more.
@@ -557,7 +545,7 @@
   }
 
   for (var i = 0; i < m; i++) {
-    array[i+n] = %_Arguments(i);
+    array[i+n] = arguments[i];
   }
 
   var new_length = n + m;
@@ -650,7 +638,7 @@
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
 
   var array = TO_OBJECT(this);
-  var len = TO_LENGTH_OR_UINT32(array.length);
+  var len = TO_LENGTH(array.length);
   var isArray = IS_ARRAY(array);
 
   if (UseSparseVariant(array, len, isArray, len)) {
@@ -685,7 +673,7 @@
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
 
   var array = TO_OBJECT(this);
-  var len = TO_LENGTH_OR_UINT32(array.length);
+  var len = TO_LENGTH(array.length);
 
   if (len === 0) {
     array.length = 0;
@@ -712,14 +700,14 @@
 
 
 function ObservedArrayUnshift() {
-  var len = TO_LENGTH_OR_UINT32(this.length);
-  var num_arguments = %_ArgumentsLength();
+  var len = TO_LENGTH(this.length);
+  var num_arguments = arguments.length;
 
   try {
     ObserveBeginPerformSplice(this);
     SimpleMove(this, 0, 0, len, num_arguments);
     for (var i = 0; i < num_arguments; i++) {
-      this[i] = %_Arguments(i);
+      this[i] = arguments[i];
     }
     var new_length = len + num_arguments;
     this.length = new_length;
@@ -739,8 +727,8 @@
     return ObservedArrayUnshift.apply(this, arguments);
 
   var array = TO_OBJECT(this);
-  var len = TO_LENGTH_OR_UINT32(array.length);
-  var num_arguments = %_ArgumentsLength();
+  var len = TO_LENGTH(array.length);
+  var num_arguments = arguments.length;
 
   if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) &&
       !%object_is_sealed(array)) {
@@ -750,7 +738,7 @@
   }
 
   for (var i = 0; i < num_arguments; i++) {
-    array[i] = %_Arguments(i);
+    array[i] = arguments[i];
   }
 
   var new_length = len + num_arguments;
@@ -763,7 +751,7 @@
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.slice");
 
   var array = TO_OBJECT(this);
-  var len = TO_LENGTH_OR_UINT32(array.length);
+  var len = TO_LENGTH(array.length);
   var start_i = TO_INTEGER(start);
   var end_i = len;
 
@@ -833,8 +821,8 @@
 
 
 function ObservedArraySplice(start, delete_count) {
-  var num_arguments = %_ArgumentsLength();
-  var len = TO_LENGTH_OR_UINT32(this.length);
+  var num_arguments = arguments.length;
+  var len = TO_LENGTH(this.length);
   var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
   var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
                                            start_i);
@@ -852,9 +840,9 @@
     // place of the deleted elements.
     var i = start_i;
     var arguments_index = 2;
-    var arguments_length = %_ArgumentsLength();
+    var arguments_length = arguments.length;
     while (arguments_index < arguments_length) {
-      this[i++] = %_Arguments(arguments_index++);
+      this[i++] = arguments[arguments_index++];
     }
     this.length = len - del_count + num_elements_to_add;
 
@@ -879,9 +867,9 @@
   if (%IsObserved(this))
     return ObservedArraySplice.apply(this, arguments);
 
-  var num_arguments = %_ArgumentsLength();
+  var num_arguments = arguments.length;
   var array = TO_OBJECT(this);
-  var len = TO_LENGTH_OR_UINT32(array.length);
+  var len = TO_LENGTH(array.length);
   var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
   var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
                                            start_i);
@@ -915,9 +903,9 @@
   // place of the deleted elements.
   var i = start_i;
   var arguments_index = 2;
-  var arguments_length = %_ArgumentsLength();
+  var arguments_length = arguments.length;
   while (arguments_index < arguments_length) {
-    array[i++] = %_Arguments(arguments_index++);
+    array[i++] = arguments[arguments_index++];
   }
   array.length = len - del_count + num_elements_to_add;
 
@@ -1068,7 +1056,7 @@
   var CopyFromPrototype = function CopyFromPrototype(obj, length) {
     var max = 0;
     for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
-      var indices = %GetArrayKeys(proto, length);
+      var indices = IS_PROXY(proto) ? length : %GetArrayKeys(proto, length);
       if (IS_NUMBER(indices)) {
         // It's an interval.
         var proto_length = indices;
@@ -1097,7 +1085,7 @@
   // elements in that range.
   var ShadowPrototypeElements = function(obj, from, to) {
     for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
-      var indices = %GetArrayKeys(proto, to);
+      var indices = IS_PROXY(proto) ? to : %GetArrayKeys(proto, to);
       if (IS_NUMBER(indices)) {
         // It's an interval.
         var proto_length = indices;
@@ -1217,7 +1205,7 @@
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
 
   var array = TO_OBJECT(this);
-  var length = TO_LENGTH_OR_UINT32(array.length);
+  var length = TO_LENGTH(array.length);
   return InnerArraySort(array, length, comparefn);
 }
 
@@ -1248,7 +1236,7 @@
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping and side effects are visible.
   var array = TO_OBJECT(this);
-  var length = TO_LENGTH_OR_UINT32(array.length);
+  var length = TO_LENGTH(array.length);
   if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
   var result = ArraySpeciesCreate(array, 0);
   return InnerArrayFilter(f, receiver, array, length, result);
@@ -1274,7 +1262,7 @@
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping and side effects are visible.
   var array = TO_OBJECT(this);
-  var length = TO_LENGTH_OR_UINT32(array.length);
+  var length = TO_LENGTH(array.length);
   InnerArrayForEach(f, receiver, array, length);
 }
 
@@ -1301,7 +1289,7 @@
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping and side effects are visible.
   var array = TO_OBJECT(this);
-  var length = TO_LENGTH_OR_UINT32(array.length);
+  var length = TO_LENGTH(array.length);
   return InnerArraySome(f, receiver, array, length);
 }
 
@@ -1325,7 +1313,7 @@
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping and side effects are visible.
   var array = TO_OBJECT(this);
-  var length = TO_LENGTH_OR_UINT32(array.length);
+  var length = TO_LENGTH(array.length);
   return InnerArrayEvery(f, receiver, array, length);
 }
 
@@ -1336,7 +1324,7 @@
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping and side effects are visible.
   var array = TO_OBJECT(this);
-  var length = TO_LENGTH_OR_UINT32(array.length);
+  var length = TO_LENGTH(array.length);
   if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
   var result = ArraySpeciesCreate(array, length);
   var is_array = IS_ARRAY(array);
@@ -1411,7 +1399,7 @@
 function ArrayIndexOf(element, index) {
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf");
 
-  var length = TO_LENGTH_OR_UINT32(this.length);
+  var length = TO_LENGTH(this.length);
   return InnerArrayIndexOf(this, element, index, length);
 }
 
@@ -1469,9 +1457,9 @@
 function ArrayLastIndexOf(element, index) {
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
 
-  var length = TO_LENGTH_OR_UINT32(this.length);
+  var length = TO_LENGTH(this.length);
   return InnerArrayLastIndexOf(this, element, index, length,
-                        %_ArgumentsLength());
+                               arguments.length);
 }
 
 
@@ -1508,9 +1496,9 @@
   // Pull out the length so that modifications to the length in the
   // loop will not affect the looping and side effects are visible.
   var array = TO_OBJECT(this);
-  var length = TO_LENGTH_OR_UINT32(array.length);
+  var length = TO_LENGTH(array.length);
   return InnerArrayReduce(callback, current, array, length,
-                          %_ArgumentsLength());
+                          arguments.length);
 }
 
 
@@ -1548,9 +1536,9 @@
   // Pull out the length so that side effects are visible before the
   // callback function is checked.
   var array = TO_OBJECT(this);
-  var length = TO_LENGTH_OR_UINT32(array.length);
+  var length = TO_LENGTH(array.length);
   return InnerArrayReduceRight(callback, current, array, length,
-                               %_ArgumentsLength());
+                               arguments.length);
 }
 
 
@@ -1701,7 +1689,7 @@
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
 
   var array = TO_OBJECT(this);
-  var length = TO_LENGTH_OR_UINT32(array.length);
+  var length = TO_LENGTH(array.length);
 
   return InnerArrayFill(value, start, end, array, length);
 }
@@ -1726,7 +1714,7 @@
 
   while (k < length) {
     var elementK = array[k];
-    if (SameValueZero(searchElement, elementK)) {
+    if (%SameValueZero(searchElement, elementK)) {
       return true;
     }
 
@@ -1778,23 +1766,10 @@
 
   if (!IS_UNDEFINED(iterable)) {
     result = %IsConstructor(this) ? new this() : [];
-
-    var iterator = GetIterator(items, iterable);
-
     k = 0;
-    while (true) {
-      var next = iterator.next();
 
-      if (!IS_RECEIVER(next)) {
-        throw MakeTypeError(kIteratorResultNotAnObject, next);
-      }
-
-      if (next.done) {
-        result.length = k;
-        return result;
-      }
-
-      nextValue = next.value;
+    for (nextValue of
+         { [iteratorSymbol]() { return GetIterator(items, iterable) } }) {
       if (mapping) {
         mappedValue = %_Call(mapfn, receiver, nextValue, k);
       } else {
@@ -1803,6 +1778,8 @@
       AddArrayElement(this, result, k, mappedValue);
       k++;
     }
+    result.length = k;
+    return result;
   } else {
     var len = TO_LENGTH(items.length);
     result = %IsConstructor(this) ? new this(len) : new GlobalArray(len);
@@ -1824,13 +1801,13 @@
 
 
 // ES6, draft 05-22-14, section 22.1.2.3
-function ArrayOf() {
-  var length = %_ArgumentsLength();
+function ArrayOf(...args) {
+  var length = args.length;
   var constructor = this;
   // TODO: Implement IsConstructor (ES6 section 7.2.5)
   var array = %IsConstructor(constructor) ? new constructor(length) : [];
   for (var i = 0; i < length; i++) {
-    AddArrayElement(constructor, array, i, %_Arguments(i));
+    AddArrayElement(constructor, array, i, args[i]);
   }
   array.length = length;
   return array;
diff --git a/src/js/generator.js b/src/js/generator.js
index 7f43656..3dcdcc0 100644
--- a/src/js/generator.js
+++ b/src/js/generator.js
@@ -36,15 +36,31 @@
   if (continuation > 0) {
     // Generator is suspended.
     DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
-    try {
-      return %_GeneratorNext(this, value);
-    } catch (e) {
-      %GeneratorClose(this);
-      throw e;
-    }
+    return %_GeneratorNext(this, value);
   } else if (continuation == 0) {
     // Generator is already closed.
-    return { value: void 0, done: true };
+    return %_CreateIterResultObject(UNDEFINED, true);
+  } else {
+    // Generator is running.
+    throw MakeTypeError(kGeneratorRunning);
+  }
+}
+
+
+function GeneratorObjectReturn(value) {
+  if (!IS_GENERATOR(this)) {
+    throw MakeTypeError(kIncompatibleMethodReceiver,
+                        '[Generator].prototype.return', this);
+  }
+
+  var continuation = %GeneratorGetContinuation(this);
+  if (continuation > 0) {
+    // Generator is suspended.
+    DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
+    return %_GeneratorReturn(this, value);
+  } else if (continuation == 0) {
+    // Generator is already closed.
+    return %_CreateIterResultObject(value, true);
   } else {
     // Generator is running.
     throw MakeTypeError(kGeneratorRunning);
@@ -61,12 +77,8 @@
   var continuation = %GeneratorGetContinuation(this);
   if (continuation > 0) {
     // Generator is suspended.
-    try {
-      return %_GeneratorThrow(this, exn);
-    } catch (e) {
-      %GeneratorClose(this);
-      throw e;
-    }
+    DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
+    return %_GeneratorThrow(this, exn);
   } else if (continuation == 0) {
     // Generator is already closed.
     throw exn;
@@ -78,9 +90,11 @@
 
 // ----------------------------------------------------------------------------
 
-// Both Runtime_GeneratorNext and Runtime_GeneratorThrow are supported by
-// neither Crankshaft nor TurboFan, disable optimization of wrappers here.
+// None of the three resume operations (Runtime_GeneratorNext,
+// Runtime_GeneratorReturn, Runtime_GeneratorThrow) is supported by
+// Crankshaft or TurboFan.  Disable optimization of wrappers here.
 %NeverOptimizeFunction(GeneratorObjectNext);
+%NeverOptimizeFunction(GeneratorObjectReturn);
 %NeverOptimizeFunction(GeneratorObjectThrow);
 
 // Set up non-enumerable functions on the generator prototype object.
@@ -88,6 +102,7 @@
 utils.InstallFunctions(GeneratorObjectPrototype,
                        DONT_ENUM,
                       ["next", GeneratorObjectNext,
+                       "return", GeneratorObjectReturn,
                        "throw", GeneratorObjectThrow]);
 
 %AddNamedProperty(GeneratorObjectPrototype, "constructor",
diff --git a/src/js/harmony-reflect.js b/src/js/harmony-reflect.js
deleted file mode 100644
index dcadad5..0000000
--- a/src/js/harmony-reflect.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2013-2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalReflect = global.Reflect;
-var MakeTypeError;
-var ReflectApply = utils.ImportNow("reflect_apply");
-var ReflectConstruct = utils.ImportNow("reflect_construct");
-
-utils.Import(function(from) {
-  MakeTypeError = from.MakeTypeError;
-});
-
-// -------------------------------------------------------------------
-
-function ReflectEnumerate(obj) {
-  if (!IS_RECEIVER(obj))
-    throw MakeTypeError(kCalledOnNonObject, "Reflect.enumerate")
-  return (function* () { for (var x in obj) yield x })();
-}
-
-utils.InstallFunctions(GlobalReflect, DONT_ENUM, [
-  "apply", ReflectApply,
-  "construct", ReflectConstruct,
-  "enumerate", ReflectEnumerate
-]);
-
-})
diff --git a/src/js/i18n.js b/src/js/i18n.js
index 7e00fcd..7b2f5a1 100644
--- a/src/js/i18n.js
+++ b/src/js/i18n.js
@@ -232,8 +232,8 @@
           // DateTimeFormat.format needs to be 0 arg method, but can stil
           // receive optional dateValue param. If one was provided, pass it
           // along.
-          if (%_ArgumentsLength() > 0) {
-            return implementation(that, %_Arguments(0));
+          if (arguments.length > 0) {
+            return implementation(that, arguments[0]);
           } else {
             return implementation(that);
           }
@@ -1002,8 +1002,8 @@
  * @constructor
  */
 %AddNamedProperty(Intl, 'Collator', function() {
-    var locales = %_Arguments(0);
-    var options = %_Arguments(1);
+    var locales = arguments[0];
+    var options = arguments[1];
 
     if (!this || this === Intl) {
       // Constructor is called as a function.
@@ -1060,7 +1060,7 @@
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
 
-    return supportedLocalesOf('collator', locales, %_Arguments(1));
+    return supportedLocalesOf('collator', locales, arguments[1]);
   },
   DONT_ENUM
 );
@@ -1255,8 +1255,8 @@
  * @constructor
  */
 %AddNamedProperty(Intl, 'NumberFormat', function() {
-    var locales = %_Arguments(0);
-    var options = %_Arguments(1);
+    var locales = arguments[0];
+    var options = arguments[1];
 
     if (!this || this === Intl) {
       // Constructor is called as a function.
@@ -1332,7 +1332,7 @@
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
 
-    return supportedLocalesOf('numberformat', locales, %_Arguments(1));
+    return supportedLocalesOf('numberformat', locales, arguments[1]);
   },
   DONT_ENUM
 );
@@ -1659,8 +1659,8 @@
  * @constructor
  */
 %AddNamedProperty(Intl, 'DateTimeFormat', function() {
-    var locales = %_Arguments(0);
-    var options = %_Arguments(1);
+    var locales = arguments[0];
+    var options = arguments[1];
 
     if (!this || this === Intl) {
       // Constructor is called as a function.
@@ -1755,7 +1755,7 @@
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
 
-    return supportedLocalesOf('dateformat', locales, %_Arguments(1));
+    return supportedLocalesOf('dateformat', locales, arguments[1]);
   },
   DONT_ENUM
 );
@@ -1886,8 +1886,8 @@
  * @constructor
  */
 %AddNamedProperty(Intl, 'v8BreakIterator', function() {
-    var locales = %_Arguments(0);
-    var options = %_Arguments(1);
+    var locales = arguments[0];
+    var options = arguments[1];
 
     if (!this || this === Intl) {
       // Constructor is called as a function.
@@ -1943,7 +1943,7 @@
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
 
-    return supportedLocalesOf('breakiterator', locales, %_Arguments(1));
+    return supportedLocalesOf('breakiterator', locales, arguments[1]);
   },
   DONT_ENUM
 );
@@ -2061,8 +2061,8 @@
       throw MakeTypeError(kMethodInvokedOnNullOrUndefined);
     }
 
-    var locales = %_Arguments(1);
-    var options = %_Arguments(2);
+    var locales = arguments[1];
+    var options = arguments[2];
     var collator = cachedOrNewService('collator', locales, options);
     return compare(collator, this, that);
   }
@@ -2085,7 +2085,7 @@
     CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
     var s = TO_STRING(this);
 
-    var formArg = %_Arguments(0);
+    var formArg = arguments[0];
     var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING(formArg);
 
     var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
@@ -2114,8 +2114,8 @@
       throw MakeTypeError(kMethodInvokedOnWrongType, "Number");
     }
 
-    var locales = %_Arguments(0);
-    var options = %_Arguments(1);
+    var locales = arguments[0];
+    var options = arguments[1];
     var numberFormat = cachedOrNewService('numberformat', locales, options);
     return formatNumber(numberFormat, this);
   }
@@ -2151,8 +2151,8 @@
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
 
-    var locales = %_Arguments(0);
-    var options = %_Arguments(1);
+    var locales = arguments[0];
+    var options = arguments[1];
     return toLocaleDateTime(
         this, locales, options, 'any', 'all', 'dateformatall');
   }
@@ -2169,8 +2169,8 @@
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
 
-    var locales = %_Arguments(0);
-    var options = %_Arguments(1);
+    var locales = arguments[0];
+    var options = arguments[1];
     return toLocaleDateTime(
         this, locales, options, 'date', 'date', 'dateformatdate');
   }
@@ -2187,8 +2187,8 @@
       throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
     }
 
-    var locales = %_Arguments(0);
-    var options = %_Arguments(1);
+    var locales = arguments[0];
+    var options = arguments[1];
     return toLocaleDateTime(
         this, locales, options, 'time', 'time', 'dateformattime');
   }
diff --git a/src/js/json.js b/src/js/json.js
index b8836ea..73d7802 100644
--- a/src/js/json.js
+++ b/src/js/json.js
@@ -187,7 +187,7 @@
 
 
 function JSONStringify(value, replacer, space) {
-  if (%_ArgumentsLength() == 1 && !IS_PROXY(value)) {
+  if (arguments.length === 1 && !IS_PROXY(value)) {
     return %BasicJSONStringify(value);
   }
   if (!IS_CALLABLE(replacer) && %is_arraylike(replacer)) {
@@ -234,6 +234,9 @@
   } else {
     gap = "";
   }
+  if (!IS_CALLABLE(replacer) && !property_list && !gap && !IS_PROXY(value)) {
+    return %BasicJSONStringify(value);
+  }
   return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
 }
 
diff --git a/src/js/macros.py b/src/js/macros.py
index 3bcc8c1..b2a7856 100644
--- a/src/js/macros.py
+++ b/src/js/macros.py
@@ -67,9 +67,9 @@
 macro IS_BOOLEAN(arg)           = (typeof(arg) === 'boolean');
 macro IS_BOOLEAN_WRAPPER(arg)   = (%_ClassOf(arg) === 'Boolean');
 macro IS_DATAVIEW(arg)          = (%_ClassOf(arg) === 'DataView');
-macro IS_DATE(arg)              = (%_IsDate(arg));
+macro IS_DATE(arg)              = (%IsDate(arg));
 macro IS_ERROR(arg)             = (%_ClassOf(arg) === 'Error');
-macro IS_FUNCTION(arg)          = (%_IsFunction(arg));
+macro IS_FUNCTION(arg)          = (%IsFunction(arg));
 macro IS_GENERATOR(arg)         = (%_ClassOf(arg) === 'Generator');
 macro IS_GLOBAL(arg)            = (%_ClassOf(arg) === 'global');
 macro IS_MAP(arg)               = (%_ClassOf(arg) === 'Map');
@@ -85,7 +85,7 @@
 macro IS_SET(arg)               = (%_ClassOf(arg) === 'Set');
 macro IS_SET_ITERATOR(arg)      = (%_ClassOf(arg) === 'Set Iterator');
 macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
-macro IS_SIMD_VALUE(arg)        = (%_IsSimdValue(arg));
+macro IS_SIMD_VALUE(arg)        = (%IsSimdValue(arg));
 macro IS_STRING(arg)            = (typeof(arg) === 'string');
 macro IS_STRING_WRAPPER(arg)    = (%_ClassOf(arg) === 'String');
 macro IS_STRONG(arg)            = (%IsStrong(arg));
@@ -114,8 +114,6 @@
 macro TO_INT32(arg) = ((arg) | 0);
 macro TO_UINT32(arg) = ((arg) >>> 0);
 macro TO_LENGTH(arg) = (%_ToLength(arg));
-macro TO_LENGTH_OR_UINT32(arg) = (FLAG_harmony_tolength ? TO_LENGTH(arg) : TO_UINT32(arg));
-macro TO_LENGTH_OR_INTEGER(arg) = (FLAG_harmony_tolength ? TO_LENGTH(arg) : TO_INTEGER(arg));
 macro TO_STRING(arg) = (%_ToString(arg));
 macro TO_NUMBER(arg) = (%_ToNumber(arg));
 macro TO_OBJECT(arg) = (%_ToObject(arg));
diff --git a/src/js/math.js b/src/js/math.js
index 990a7e9..a698fd4 100644
--- a/src/js/math.js
+++ b/src/js/math.js
@@ -75,60 +75,6 @@
   return %_MathLogRT(TO_NUMBER(x));
 }
 
-// ECMA 262 - 15.8.2.11
-function MathMax(arg1, arg2) {  // length == 2
-  var length = %_ArgumentsLength();
-  if (length == 2) {
-    arg1 = TO_NUMBER(arg1);
-    arg2 = TO_NUMBER(arg2);
-    if (arg2 > arg1) return arg2;
-    if (arg1 > arg2) return arg1;
-    if (arg1 == arg2) {
-      // Make sure -0 is considered less than +0.
-      return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg2 : arg1;
-    }
-    // All comparisons failed, one of the arguments must be NaN.
-    return NaN;
-  }
-  var r = -INFINITY;
-  for (var i = 0; i < length; i++) {
-    var n = %_Arguments(i);
-    n = TO_NUMBER(n);
-    // Make sure +0 is considered greater than -0.
-    if (NUMBER_IS_NAN(n) || n > r || (r === 0 && n === 0 && %_IsMinusZero(r))) {
-      r = n;
-    }
-  }
-  return r;
-}
-
-// ECMA 262 - 15.8.2.12
-function MathMin(arg1, arg2) {  // length == 2
-  var length = %_ArgumentsLength();
-  if (length == 2) {
-    arg1 = TO_NUMBER(arg1);
-    arg2 = TO_NUMBER(arg2);
-    if (arg2 > arg1) return arg1;
-    if (arg1 > arg2) return arg2;
-    if (arg1 == arg2) {
-      // Make sure -0 is considered less than +0.
-      return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg1 : arg2;
-    }
-    // All comparisons failed, one of the arguments must be NaN.
-    return NaN;
-  }
-  var r = INFINITY;
-  for (var i = 0; i < length; i++) {
-    var n = %_Arguments(i);
-    n = TO_NUMBER(n);
-    // Make sure -0 is considered less than +0.
-    if (NUMBER_IS_NAN(n) || n < r || (r === 0 && n === 0 && %_IsMinusZero(n))) {
-      r = n;
-    }
-  }
-  return r;
-}
-
 // ECMA 262 - 15.8.2.13
 function MathPowJS(x, y) {
   return %_MathPow(TO_NUMBER(x), TO_NUMBER(y));
@@ -218,17 +164,14 @@
   // We may want to introduce fast paths for two arguments and when
   // normalization to avoid overflow is not necessary.  For now, we
   // simply assume the general case.
-  var length = %_ArgumentsLength();
-  var args = new InternalArray(length);
+  var length = arguments.length;
   var max = 0;
   for (var i = 0; i < length; i++) {
-    var n = %_Arguments(i);
-    n = TO_NUMBER(n);
-    if (n === INFINITY || n === -INFINITY) return INFINITY;
-    n = MathAbs(n);
+    var n = MathAbs(arguments[i]);
     if (n > max) max = n;
-    args[i] = n;
+    arguments[i] = n;
   }
+  if (max === INFINITY) return INFINITY;
 
   // Kahan summation to avoid rounding errors.
   // Normalize the numbers to the largest one to avoid overflow.
@@ -236,7 +179,7 @@
   var sum = 0;
   var compensation = 0;
   for (var i = 0; i < length; i++) {
-    var n = args[i] / max;
+    var n = arguments[i] / max;
     var summand = n * n - compensation;
     var preliminary = sum + summand;
     compensation = (preliminary - sum) - summand;
@@ -314,8 +257,6 @@
   "sqrt", MathSqrtJS,
   "atan2", MathAtan2JS,
   "pow", MathPowJS,
-  "max", MathMax,
-  "min", MathMin,
   "imul", MathImul,
   "sign", MathSign,
   "trunc", MathTrunc,
@@ -349,8 +290,6 @@
   to.MathExp = MathExp;
   to.MathFloor = MathFloorJS;
   to.IntRandom = MathRandomRaw;
-  to.MathMax = MathMax;
-  to.MathMin = MathMin;
 });
 
 })
diff --git a/src/js/prologue.js b/src/js/prologue.js
index 2779393..24225a0 100644
--- a/src/js/prologue.js
+++ b/src/js/prologue.js
@@ -179,8 +179,6 @@
     "MapEntries",
     "MapIterator",
     "MapIteratorNext",
-    "MathMax",
-    "MathMin",
     "MaxSimple",
     "MinSimple",
     "ObjectDefineProperty",
@@ -189,7 +187,6 @@
     "PromiseChain",
     "PromiseDeferred",
     "PromiseResolved",
-    "SameValueZero",
     "SetIterator",
     "SetIteratorNext",
     "SetValues",
diff --git a/src/js/proxy.js b/src/js/proxy.js
index 842bac0..a111c09 100644
--- a/src/js/proxy.js
+++ b/src/js/proxy.js
@@ -12,11 +12,6 @@
 // Imports
 //
 var GlobalProxy = global.Proxy;
-var MakeTypeError;
-
-utils.Import(function(from) {
-  MakeTypeError = from.MakeTypeError;
-});
 
 //----------------------------------------------------------------------------
 
@@ -25,33 +20,6 @@
   return {proxy: p, revoke: () => %JSProxyRevoke(p)};
 }
 
-// -------------------------------------------------------------------
-// Proxy Builtins
-
-// Implements part of ES6 9.5.11 Proxy.[[Enumerate]]:
-// Call the trap, which should return an iterator, exhaust the iterator,
-// and return an array containing the values.
-function ProxyEnumerate(trap, handler, target) {
-  // 7. Let trapResult be ? Call(trap, handler, «target»).
-  var trap_result = %_Call(trap, handler, target);
-  // 8. If Type(trapResult) is not Object, throw a TypeError exception.
-  if (!IS_RECEIVER(trap_result)) {
-    throw MakeTypeError(kProxyEnumerateNonObject);
-  }
-  // 9. Return trapResult.
-  var result = [];
-  for (var it = trap_result.next(); !it.done; it = trap_result.next()) {
-    var key = it.value;
-    // Not yet spec'ed as of 2015-11-25, but will be spec'ed soon:
-    // If the iterator returns a non-string value, throw a TypeError.
-    if (!IS_STRING(key)) {
-      throw MakeTypeError(kProxyEnumerateNonString);
-    }
-    result.push(key);
-  }
-  return result;
-}
-
 //-------------------------------------------------------------------
 
 //Set up non-enumerable properties of the Proxy object.
@@ -59,11 +27,4 @@
   "revocable", ProxyCreateRevocable
 ]);
 
-// -------------------------------------------------------------------
-// Exports
-
-%InstallToContext([
-  "proxy_enumerate", ProxyEnumerate,
-]);
-
 })
diff --git a/src/js/regexp.js b/src/js/regexp.js
index eeacd6e..e80d019 100644
--- a/src/js/regexp.js
+++ b/src/js/regexp.js
@@ -9,7 +9,7 @@
 // -------------------------------------------------------------------
 // Imports
 
-var FLAG_harmony_tolength;
+var ExpandReplacement;
 var GlobalObject = global.Object;
 var GlobalRegExp = global.RegExp;
 var GlobalRegExpPrototype;
@@ -17,14 +17,12 @@
 var InternalPackedArray = utils.InternalPackedArray;
 var MakeTypeError;
 var matchSymbol = utils.ImportNow("match_symbol");
+var replaceSymbol = utils.ImportNow("replace_symbol");
 var searchSymbol = utils.ImportNow("search_symbol");
 var splitSymbol = utils.ImportNow("split_symbol");
 
-utils.ImportFromExperimental(function(from) {
-  FLAG_harmony_tolength = from.FLAG_harmony_tolength;
-});
-
 utils.Import(function(from) {
+  ExpandReplacement = from.ExpandReplacement;
   MakeTypeError = from.MakeTypeError;
 });
 
@@ -176,7 +174,7 @@
 
   // Conversion is required by the ES2015 specification (RegExpBuiltinExec
   // algorithm, step 4) even if the value is discarded for non-global RegExps.
-  var i = TO_LENGTH_OR_INTEGER(lastIndex);
+  var i = TO_LENGTH(lastIndex);
 
   var updateLastIndex = REGEXP_GLOBAL(this) || REGEXP_STICKY(this);
   if (updateLastIndex) {
@@ -223,7 +221,7 @@
 
   // Conversion is required by the ES2015 specification (RegExpBuiltinExec
   // algorithm, step 4) even if the value is discarded for non-global RegExps.
-  var i = TO_LENGTH_OR_INTEGER(lastIndex);
+  var i = TO_LENGTH(lastIndex);
 
   if (REGEXP_GLOBAL(this) || REGEXP_STICKY(this)) {
     if (i < 0 || i > string.length) {
@@ -262,7 +260,7 @@
 }
 
 function TrimRegExp(regexp) {
-  if (!%_ObjectEquals(regexp_key, regexp)) {
+  if (regexp_key !== regexp) {
     regexp_key = regexp;
     regexp_val =
       new GlobalRegExp(
@@ -283,8 +281,11 @@
       %IncrementUseCounter(kRegExpPrototypeToString);
       return '/(?:)/';
     }
-    throw MakeTypeError(kIncompatibleMethodReceiver,
-                        'RegExp.prototype.toString', this);
+    if (!IS_RECEIVER(this)) {
+      throw MakeTypeError(
+          kIncompatibleMethodReceiver, 'RegExp.prototype.toString', this);
+    }
+    return '/' + TO_STRING(this.source) + '/' + TO_STRING(this.flags);
   }
   var result = '/' + REGEXP_SOURCE(this) + '/';
   if (REGEXP_GLOBAL(this)) result += 'g';
@@ -296,6 +297,15 @@
 }
 
 
+function AtSurrogatePair(subject, index) {
+  if (index + 1 >= subject.length) return false;
+  var first = %_StringCharCodeAt(subject, index);
+  if (first < 0xD800 || first > 0xDBFF) return false;
+  var second = %_StringCharCodeAt(subject, index + 1);
+  return second >= 0xDC00 || second <= 0xDFFF;
+}
+
+
 // ES6 21.2.5.11.
 function RegExpSplit(string, limit) {
   // TODO(yangguo): allow non-regexp receivers.
@@ -337,7 +347,11 @@
 
     // We ignore a zero-length match at the currentIndex.
     if (startIndex === endIndex && endIndex === currentIndex) {
-      startIndex++;
+      if (REGEXP_UNICODE(this) && AtSurrogatePair(subject, startIndex)) {
+        startIndex += 2;
+      } else {
+        startIndex++;
+      }
       continue;
     }
 
@@ -382,6 +396,175 @@
 }
 
 
+// ES6 21.2.5.8.
+
+// TODO(lrn): This array will survive indefinitely if replace is never
+// called again. However, it will be empty, since the contents are cleared
+// in the finally block.
+var reusableReplaceArray = new InternalArray(4);
+
+// Helper function for replacing regular expressions with the result of a
+// function application in String.prototype.replace.
+function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
+  var resultArray = reusableReplaceArray;
+  if (resultArray) {
+    reusableReplaceArray = null;
+  } else {
+    // Inside a nested replace (replace called from the replacement function
+    // of another replace) or we have failed to set the reusable array
+    // back due to an exception in a replacement function. Create a new
+    // array to use in the future, or until the original is written back.
+    resultArray = new InternalArray(16);
+  }
+  var res = %RegExpExecMultiple(regexp,
+                                subject,
+                                RegExpLastMatchInfo,
+                                resultArray);
+  regexp.lastIndex = 0;
+  if (IS_NULL(res)) {
+    // No matches at all.
+    reusableReplaceArray = resultArray;
+    return subject;
+  }
+  var len = res.length;
+  if (NUMBER_OF_CAPTURES(RegExpLastMatchInfo) == 2) {
+    // If the number of captures is two then there are no explicit captures in
+    // the regexp, just the implicit capture that captures the whole match.  In
+    // this case we can simplify quite a bit and end up with something faster.
+    // The builder will consist of some integers that indicate slices of the
+    // input string and some replacements that were returned from the replace
+    // function.
+    var match_start = 0;
+    for (var i = 0; i < len; i++) {
+      var elem = res[i];
+      if (%_IsSmi(elem)) {
+        // Integers represent slices of the original string.
+        if (elem > 0) {
+          match_start = (elem >> 11) + (elem & 0x7ff);
+        } else {
+          match_start = res[++i] - elem;
+        }
+      } else {
+        var func_result = replace(elem, match_start, subject);
+        // Overwrite the i'th element in the results with the string we got
+        // back from the callback function.
+        res[i] = TO_STRING(func_result);
+        match_start += elem.length;
+      }
+    }
+  } else {
+    for (var i = 0; i < len; i++) {
+      var elem = res[i];
+      if (!%_IsSmi(elem)) {
+        // elem must be an Array.
+        // Use the apply argument as backing for global RegExp properties.
+        var func_result = %Apply(replace, UNDEFINED, elem, 0, elem.length);
+        // Overwrite the i'th element in the results with the string we got
+        // back from the callback function.
+        res[i] = TO_STRING(func_result);
+      }
+    }
+  }
+  var result = %StringBuilderConcat(res, len, subject);
+  resultArray.length = 0;
+  reusableReplaceArray = resultArray;
+  return result;
+}
+
+
+// Compute the string of a given regular expression capture.
+function CaptureString(string, lastCaptureInfo, index) {
+  // Scale the index.
+  var scaled = index << 1;
+  // Compute start and end.
+  var start = lastCaptureInfo[CAPTURE(scaled)];
+  // If start isn't valid, return undefined.
+  if (start < 0) return;
+  var end = lastCaptureInfo[CAPTURE(scaled + 1)];
+  return %_SubString(string, start, end);
+}
+
+
+function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
+  var matchInfo = DoRegExpExec(regexp, subject, 0);
+  if (IS_NULL(matchInfo)) {
+    regexp.lastIndex = 0;
+    return subject;
+  }
+  var index = matchInfo[CAPTURE0];
+  var result = %_SubString(subject, 0, index);
+  var endOfMatch = matchInfo[CAPTURE1];
+  // Compute the parameter list consisting of the match, captures, index,
+  // and subject for the replace function invocation.
+  // The number of captures plus one for the match.
+  var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
+  var replacement;
+  if (m == 1) {
+    // No captures, only the match, which is always valid.
+    var s = %_SubString(subject, index, endOfMatch);
+    // Don't call directly to avoid exposing the built-in global object.
+    replacement = replace(s, index, subject);
+  } else {
+    var parameters = new InternalArray(m + 2);
+    for (var j = 0; j < m; j++) {
+      parameters[j] = CaptureString(subject, matchInfo, j);
+    }
+    parameters[j] = index;
+    parameters[j + 1] = subject;
+
+    replacement = %Apply(replace, UNDEFINED, parameters, 0, j + 2);
+  }
+
+  result += replacement;  // The add method converts to string if necessary.
+  // Can't use matchInfo any more from here, since the function could
+  // overwrite it.
+  return result + %_SubString(subject, endOfMatch, subject.length);
+}
+
+
+function RegExpReplace(string, replace) {
+  // TODO(littledan): allow non-regexp receivers.
+  if (!IS_REGEXP(this)) {
+    throw MakeTypeError(kIncompatibleMethodReceiver,
+                        "RegExp.prototype.@@replace", this);
+  }
+  var subject = TO_STRING(string);
+  var search = this;
+
+  if (!IS_CALLABLE(replace)) {
+    replace = TO_STRING(replace);
+
+    if (!REGEXP_GLOBAL(search)) {
+      // Non-global regexp search, string replace.
+      var match = DoRegExpExec(search, subject, 0);
+      if (match == null) {
+        search.lastIndex = 0
+        return subject;
+      }
+      if (replace.length == 0) {
+        return %_SubString(subject, 0, match[CAPTURE0]) +
+               %_SubString(subject, match[CAPTURE1], subject.length)
+      }
+      return ExpandReplacement(replace, subject, RegExpLastMatchInfo,
+                                 %_SubString(subject, 0, match[CAPTURE0])) +
+             %_SubString(subject, match[CAPTURE1], subject.length);
+    }
+
+    // Global regexp search, string replace.
+    search.lastIndex = 0;
+    return %StringReplaceGlobalRegExpWithString(
+        subject, search, replace, RegExpLastMatchInfo);
+  }
+
+  if (REGEXP_GLOBAL(search)) {
+    // Global regexp search, function replace.
+    return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
+  }
+  // Non-global regexp search, function replace.
+  return StringReplaceNonGlobalRegExpWithFunction(subject, search, replace);
+}
+
+
 // ES6 21.2.5.9.
 function RegExpSearch(string) {
   // TODO(yangguo): allow non-regexp receivers.
@@ -530,6 +713,7 @@
   "toString", RegExpToString,
   "compile", RegExpCompileJS,
   matchSymbol, RegExpMatch,
+  replaceSymbol, RegExpReplace,
   searchSymbol, RegExpSearch,
   splitSymbol, RegExpSplit,
 ]);
@@ -539,9 +723,6 @@
 utils.InstallGetter(GlobalRegExp.prototype, 'multiline', RegExpGetMultiline);
 utils.InstallGetter(GlobalRegExp.prototype, 'source', RegExpGetSource);
 
-// The length of compile is 1 in SpiderMonkey.
-%FunctionSetLength(GlobalRegExp.prototype.compile, 1);
-
 // The properties `input` and `$_` are aliases for each other.  When this
 // value is set the value it is set to is coerced to a string.
 // Getter and setter for the input.
diff --git a/src/js/runtime.js b/src/js/runtime.js
index 301d75a..7a61094 100644
--- a/src/js/runtime.js
+++ b/src/js/runtime.js
@@ -36,47 +36,12 @@
 
 // ----------------------------------------------------------------------------
 
-/* -----------------------------
-   - - -   H e l p e r s   - - -
-   -----------------------------
+
+/* ---------------------------------
+   - - -   U t i l i t i e s   - - -
+   ---------------------------------
 */
 
-function CONCAT_ITERABLE_TO_ARRAY(iterable) {
-  return %concat_iterable_to_array(this, iterable);
-};
-
-
-/* -------------------------------------
-   - - -   C o n v e r s i o n s   - - -
-   -------------------------------------
-*/
-
-// ES5, section 9.12
-function SameValue(x, y) {
-  if (typeof x != typeof y) return false;
-  if (IS_NUMBER(x)) {
-    if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
-    // x is +0 and y is -0 or vice versa.
-    if (x === 0 && y === 0 && %_IsMinusZero(x) != %_IsMinusZero(y)) {
-      return false;
-    }
-  }
-  if (IS_SIMD_VALUE(x)) return %SimdSameValue(x, y);
-  return x === y;
-}
-
-
-// ES6, section 7.2.4
-function SameValueZero(x, y) {
-  if (typeof x != typeof y) return false;
-  if (IS_NUMBER(x)) {
-    if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
-  }
-  if (IS_SIMD_VALUE(x)) return %SimdSameValueZero(x, y);
-  return x === y;
-}
-
-
 function ConcatIterableToArray(target, iterable) {
    var index = target.length;
    for (var element of iterable) {
@@ -86,12 +51,6 @@
 }
 
 
-/* ---------------------------------
-   - - -   U t i l i t i e s   - - -
-   ---------------------------------
-*/
-
-
 // This function should be called rather than %AddElement in contexts where the
 // argument might not be less than 2**32-1. ES2015 ToLength semantics mean that
 // this is a concern at basically all callsites.
@@ -174,17 +133,11 @@
   to.AddIndexedProperty = AddIndexedProperty;
   to.MaxSimple = MaxSimple;
   to.MinSimple = MinSimple;
-  to.SameValue = SameValue;
-  to.SameValueZero = SameValueZero;
   to.ToPositiveInteger = ToPositiveInteger;
   to.SpeciesConstructor = SpeciesConstructor;
 });
 
 %InstallToContext([
-  "concat_iterable_to_array_builtin", CONCAT_ITERABLE_TO_ARRAY,
-]);
-
-%InstallToContext([
   "concat_iterable_to_array", ConcatIterableToArray,
 ]);
 
diff --git a/src/js/spread.js b/src/js/spread.js
index 235c91a..82ea839 100644
--- a/src/js/spread.js
+++ b/src/js/spread.js
@@ -18,11 +18,11 @@
 // -------------------------------------------------------------------
 
 function SpreadArguments() {
-  var count = %_ArgumentsLength();
+  var count = arguments.length;
   var args = new InternalArray();
 
   for (var i = 0; i < count; ++i) {
-    var array = %_Arguments(i);
+    var array = arguments[i];
     var length = array.length;
     for (var j = 0; j < length; ++j) {
       args.push(array[j]);
diff --git a/src/js/string.js b/src/js/string.js
index b220038..a401978 100644
--- a/src/js/string.js
+++ b/src/js/string.js
@@ -17,12 +17,11 @@
 var InternalPackedArray = utils.InternalPackedArray;
 var MakeRangeError;
 var MakeTypeError;
-var MathMax;
-var MathMin;
+var MaxSimple;
+var MinSimple;
 var matchSymbol = utils.ImportNow("match_symbol");
-var RegExpExec;
 var RegExpExecNoTests;
-var RegExpLastMatchInfo;
+var replaceSymbol = utils.ImportNow("replace_symbol");
 var searchSymbol = utils.ImportNow("search_symbol");
 var splitSymbol = utils.ImportNow("split_symbol");
 
@@ -31,11 +30,9 @@
   ArrayJoin = from.ArrayJoin;
   MakeRangeError = from.MakeRangeError;
   MakeTypeError = from.MakeTypeError;
-  MathMax = from.MathMax;
-  MathMin = from.MathMin;
-  RegExpExec = from.RegExpExec;
+  MaxSimple = from.MaxSimple;
+  MinSimple = from.MinSimple;
   RegExpExecNoTests = from.RegExpExecNoTests;
-  RegExpLastMatchInfo = from.RegExpLastMatchInfo;
 });
 
 //-------------------------------------------------------------------
@@ -84,41 +81,34 @@
 
 // ECMA-262, section 15.5.4.6
 function StringConcat(other /* and more */) {  // length == 1
+  "use strict";
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.concat");
-  var len = %_ArgumentsLength();
-  var this_as_string = TO_STRING(this);
-  if (len === 1) {
-    return this_as_string + TO_STRING(other);
+  var s = TO_STRING(this);
+  var len = arguments.length;
+  for (var i = 0; i < len; ++i) {
+    s = s + TO_STRING(arguments[i]);
   }
-  var parts = new InternalArray(len + 1);
-  parts[0] = this_as_string;
-  for (var i = 0; i < len; i++) {
-    var part = %_Arguments(i);
-    parts[i + 1] = TO_STRING(part);
-  }
-  return %StringBuilderConcat(parts, len + 1, "");
+  return s;
 }
 
 
 // ECMA-262 section 15.5.4.7
-function StringIndexOfJS(pattern /* position */) {  // length == 1
+function StringIndexOf(pattern, position) {  // length == 1
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.indexOf");
 
   var subject = TO_STRING(this);
   pattern = TO_STRING(pattern);
-  var index = 0;
-  if (%_ArgumentsLength() > 1) {
-    index = %_Arguments(1);  // position
-    index = TO_INTEGER(index);
-    if (index < 0) index = 0;
-    if (index > subject.length) index = subject.length;
-  }
+  var index = TO_INTEGER(position);
+  if (index < 0) index = 0;
+  if (index > subject.length) index = subject.length;
   return %StringIndexOf(subject, pattern, index);
 }
 
+%FunctionSetLength(StringIndexOf, 1);
+
 
 // ECMA-262 section 15.5.4.8
-function StringLastIndexOfJS(pat /* position */) {  // length == 1
+function StringLastIndexOf(pat, pos) {  // length == 1
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.lastIndexOf");
 
   var sub = TO_STRING(this);
@@ -126,16 +116,14 @@
   var pat = TO_STRING(pat);
   var patLength = pat.length;
   var index = subLength - patLength;
-  if (%_ArgumentsLength() > 1) {
-    var position = TO_NUMBER(%_Arguments(1));
-    if (!NUMBER_IS_NAN(position)) {
-      position = TO_INTEGER(position);
-      if (position < 0) {
-        position = 0;
-      }
-      if (position + patLength < subLength) {
-        index = position;
-      }
+  var position = TO_NUMBER(pos);
+  if (!NUMBER_IS_NAN(position)) {
+    position = TO_INTEGER(position);
+    if (position < 0) {
+      position = 0;
+    }
+    if (position + patLength < subLength) {
+      index = position;
     }
   }
   if (index < 0) {
@@ -144,6 +132,8 @@
   return %StringLastIndexOf(sub, pat, index);
 }
 
+%FunctionSetLength(StringLastIndexOf, 1);
+
 
 // ECMA-262 section 15.5.4.9
 //
@@ -180,11 +170,10 @@
 // For now we do nothing, as proper normalization requires big tables.
 // If Intl is enabled, then i18n.js will override it and provide the the
 // proper functionality.
-function StringNormalizeJS() {
+function StringNormalize(formArg) {  // length == 0
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
   var s = TO_STRING(this);
 
-  var formArg = %_Arguments(0);
   var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING(formArg);
 
   var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
@@ -197,6 +186,8 @@
   return s;
 }
 
+%FunctionSetLength(StringNormalize, 0);
+
 
 // This has the same size as the RegExpLastMatchInfo array, and can be used
 // for functions that expect that structure to be returned.  It is used when
@@ -206,14 +197,12 @@
 var reusableMatchInfo = [2, "", "", -1, -1];
 
 
-// ECMA-262, section 15.5.4.11
+// ES6, section 21.1.3.14
 function StringReplace(search, replace) {
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.replace");
 
-  var subject = TO_STRING(this);
-
   // Decision tree for dispatch
-  // .. regexp search
+  // .. regexp search (in src/js/regexp.js, RegExpReplace)
   // .... string replace
   // ...... non-global search
   // ........ empty string replace
@@ -229,40 +218,15 @@
   // ...... function replace
   // ...... string replace (with $-expansion)
 
-  if (IS_REGEXP(search)) {
-    if (!IS_CALLABLE(replace)) {
-      replace = TO_STRING(replace);
-
-      if (!REGEXP_GLOBAL(search)) {
-        // Non-global regexp search, string replace.
-        var match = RegExpExec(search, subject, 0);
-        if (match == null) {
-          search.lastIndex = 0
-          return subject;
-        }
-        if (replace.length == 0) {
-          return %_SubString(subject, 0, match[CAPTURE0]) +
-                 %_SubString(subject, match[CAPTURE1], subject.length)
-        }
-        return ExpandReplacement(replace, subject, RegExpLastMatchInfo,
-                                 %_SubString(subject, 0, match[CAPTURE0])) +
-               %_SubString(subject, match[CAPTURE1], subject.length);
-      }
-
-      // Global regexp search, string replace.
-      search.lastIndex = 0;
-      return %StringReplaceGlobalRegExpWithString(
-          subject, search, replace, RegExpLastMatchInfo);
+  if (!IS_NULL_OR_UNDEFINED(search)) {
+    var replacer = search[replaceSymbol];
+    if (!IS_UNDEFINED(replacer)) {
+      return %_Call(replacer, search, this, replace);
     }
-
-    if (REGEXP_GLOBAL(search)) {
-      // Global regexp search, function replace.
-      return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
-    }
-    // Non-global regexp search, function replace.
-    return StringReplaceNonGlobalRegExpWithFunction(subject, search, replace);
   }
 
+  var subject = TO_STRING(this);
+
   search = TO_STRING(search);
 
   if (search.length == 1 &&
@@ -379,130 +343,6 @@
 }
 
 
-// Compute the string of a given regular expression capture.
-function CaptureString(string, lastCaptureInfo, index) {
-  // Scale the index.
-  var scaled = index << 1;
-  // Compute start and end.
-  var start = lastCaptureInfo[CAPTURE(scaled)];
-  // If start isn't valid, return undefined.
-  if (start < 0) return;
-  var end = lastCaptureInfo[CAPTURE(scaled + 1)];
-  return %_SubString(string, start, end);
-}
-
-
-// TODO(lrn): This array will survive indefinitely if replace is never
-// called again. However, it will be empty, since the contents are cleared
-// in the finally block.
-var reusableReplaceArray = new InternalArray(4);
-
-// Helper function for replacing regular expressions with the result of a
-// function application in String.prototype.replace.
-function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
-  var resultArray = reusableReplaceArray;
-  if (resultArray) {
-    reusableReplaceArray = null;
-  } else {
-    // Inside a nested replace (replace called from the replacement function
-    // of another replace) or we have failed to set the reusable array
-    // back due to an exception in a replacement function. Create a new
-    // array to use in the future, or until the original is written back.
-    resultArray = new InternalArray(16);
-  }
-  var res = %RegExpExecMultiple(regexp,
-                                subject,
-                                RegExpLastMatchInfo,
-                                resultArray);
-  regexp.lastIndex = 0;
-  if (IS_NULL(res)) {
-    // No matches at all.
-    reusableReplaceArray = resultArray;
-    return subject;
-  }
-  var len = res.length;
-  if (NUMBER_OF_CAPTURES(RegExpLastMatchInfo) == 2) {
-    // If the number of captures is two then there are no explicit captures in
-    // the regexp, just the implicit capture that captures the whole match.  In
-    // this case we can simplify quite a bit and end up with something faster.
-    // The builder will consist of some integers that indicate slices of the
-    // input string and some replacements that were returned from the replace
-    // function.
-    var match_start = 0;
-    for (var i = 0; i < len; i++) {
-      var elem = res[i];
-      if (%_IsSmi(elem)) {
-        // Integers represent slices of the original string.
-        if (elem > 0) {
-          match_start = (elem >> 11) + (elem & 0x7ff);
-        } else {
-          match_start = res[++i] - elem;
-        }
-      } else {
-        var func_result = replace(elem, match_start, subject);
-        // Overwrite the i'th element in the results with the string we got
-        // back from the callback function.
-        res[i] = TO_STRING(func_result);
-        match_start += elem.length;
-      }
-    }
-  } else {
-    for (var i = 0; i < len; i++) {
-      var elem = res[i];
-      if (!%_IsSmi(elem)) {
-        // elem must be an Array.
-        // Use the apply argument as backing for global RegExp properties.
-        var func_result = %Apply(replace, UNDEFINED, elem, 0, elem.length);
-        // Overwrite the i'th element in the results with the string we got
-        // back from the callback function.
-        res[i] = TO_STRING(func_result);
-      }
-    }
-  }
-  var result = %StringBuilderConcat(res, len, subject);
-  resultArray.length = 0;
-  reusableReplaceArray = resultArray;
-  return result;
-}
-
-
-function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
-  var matchInfo = RegExpExec(regexp, subject, 0);
-  if (IS_NULL(matchInfo)) {
-    regexp.lastIndex = 0;
-    return subject;
-  }
-  var index = matchInfo[CAPTURE0];
-  var result = %_SubString(subject, 0, index);
-  var endOfMatch = matchInfo[CAPTURE1];
-  // Compute the parameter list consisting of the match, captures, index,
-  // and subject for the replace function invocation.
-  // The number of captures plus one for the match.
-  var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
-  var replacement;
-  if (m == 1) {
-    // No captures, only the match, which is always valid.
-    var s = %_SubString(subject, index, endOfMatch);
-    // Don't call directly to avoid exposing the built-in global object.
-    replacement = replace(s, index, subject);
-  } else {
-    var parameters = new InternalArray(m + 2);
-    for (var j = 0; j < m; j++) {
-      parameters[j] = CaptureString(subject, matchInfo, j);
-    }
-    parameters[j] = index;
-    parameters[j + 1] = subject;
-
-    replacement = %Apply(replace, UNDEFINED, parameters, 0, j + 2);
-  }
-
-  result += replacement;  // The add method converts to string if necessary.
-  // Can't use matchInfo any more from here, since the function could
-  // overwrite it.
-  return result + %_SubString(subject, endOfMatch, subject.length);
-}
-
-
 // ES6 21.1.3.15.
 function StringSearch(pattern) {
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
@@ -719,28 +559,14 @@
 
 
 // ECMA-262, section 15.5.3.2
-function StringFromCharCode(code) {
-  var n = %_ArgumentsLength();
-  if (n == 1) return %_StringCharFromCode(code & 0xffff);
-
-  var one_byte = %NewString(n, NEW_ONE_BYTE_STRING);
-  var i;
-  for (i = 0; i < n; i++) {
-    code = %_Arguments(i) & 0xffff;
-    if (code > 0xff) break;
-    %_OneByteSeqStringSetChar(i, code, one_byte);
+function StringFromCharCode(_) {  // length == 1
+  "use strict";
+  var s = "";
+  var n = arguments.length;
+  for (var i = 0; i < n; ++i) {
+    s += %_StringCharFromCode(arguments[i] & 0xffff);
   }
-  if (i == n) return one_byte;
-  one_byte = %TruncateString(one_byte, i);
-
-  var two_byte = %NewString(n - i, NEW_TWO_BYTE_STRING);
-  %_TwoByteSeqStringSetChar(0, code, two_byte);
-  i++;
-  for (var j = 1; i < n; i++, j++) {
-    code = %_Arguments(i) & 0xffff;
-    %_TwoByteSeqStringSetChar(j, code, two_byte);
-  }
-  return one_byte + two_byte;
+  return s;
 }
 
 
@@ -870,7 +696,7 @@
 
 
 // ES6 draft 04-05-14, section 21.1.3.18
-function StringStartsWith(searchString /* position */) {  // length == 1
+function StringStartsWith(searchString, position) {  // length == 1
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.startsWith");
 
   var s = TO_STRING(this);
@@ -880,16 +706,10 @@
   }
 
   var ss = TO_STRING(searchString);
-  var pos = 0;
-  if (%_ArgumentsLength() > 1) {
-    var arg = %_Arguments(1);  // position
-    if (!IS_UNDEFINED(arg)) {
-      pos = TO_INTEGER(arg);
-    }
-  }
+  var pos = TO_INTEGER(position);
 
   var s_len = s.length;
-  var start = MathMin(MathMax(pos, 0), s_len);
+  var start = MinSimple(MaxSimple(pos, 0), s_len);
   var ss_len = ss.length;
   if (ss_len + start > s_len) {
     return false;
@@ -898,9 +718,11 @@
   return %_SubString(s, start, start + ss_len) === ss;
 }
 
+%FunctionSetLength(StringStartsWith, 1);
+
 
 // ES6 draft 04-05-14, section 21.1.3.7
-function StringEndsWith(searchString /* position */) {  // length == 1
+function StringEndsWith(searchString, position) {  // length == 1
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.endsWith");
 
   var s = TO_STRING(this);
@@ -911,15 +733,9 @@
 
   var ss = TO_STRING(searchString);
   var s_len = s.length;
-  var pos = s_len;
-  if (%_ArgumentsLength() > 1) {
-    var arg = %_Arguments(1);  // position
-    if (!IS_UNDEFINED(arg)) {
-      pos = TO_INTEGER(arg);
-    }
-  }
+  var pos = !IS_UNDEFINED(position) ? TO_INTEGER(position) : s_len
 
-  var end = MathMin(MathMax(pos, 0), s_len);
+  var end = MinSimple(MaxSimple(pos, 0), s_len);
   var ss_len = ss.length;
   var start = end - ss_len;
   if (start < 0) {
@@ -929,9 +745,11 @@
   return %_SubString(s, start, start + ss_len) === ss;
 }
 
+%FunctionSetLength(StringEndsWith, 1);
+
 
 // ES6 draft 04-05-14, section 21.1.3.6
-function StringIncludes(searchString /* position */) {  // length == 1
+function StringIncludes(searchString, position) {  // length == 1
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.includes");
 
   var string = TO_STRING(this);
@@ -941,11 +759,7 @@
   }
 
   searchString = TO_STRING(searchString);
-  var pos = 0;
-  if (%_ArgumentsLength() > 1) {
-    pos = %_Arguments(1);  // position
-    pos = TO_INTEGER(pos);
-  }
+  var pos = TO_INTEGER(position);
 
   var stringLength = string.length;
   if (pos < 0) pos = 0;
@@ -959,6 +773,8 @@
   return %StringIndexOf(string, searchString, pos) !== -1;
 }
 
+%FunctionSetLength(StringIncludes, 1);
+
 
 // ES6 Draft 05-22-2014, section 21.1.3.3
 function StringCodePointAt(pos) {
@@ -984,12 +800,13 @@
 
 // ES6 Draft 05-22-2014, section 21.1.2.2
 function StringFromCodePoint(_) {  // length = 1
+  "use strict";
   var code;
-  var length = %_ArgumentsLength();
+  var length = arguments.length;
   var index;
   var result = "";
   for (index = 0; index < length; index++) {
-    code = %_Arguments(index);
+    code = arguments[index];
     if (!%_IsSmi(code)) {
       code = TO_NUMBER(code);
     }
@@ -1013,8 +830,8 @@
 
 // ES6 Draft 03-17-2015, section 21.1.2.4
 function StringRaw(callSite) {
-  // TODO(caitp): Use rest parameters when implemented
-  var numberOfSubstitutions = %_ArgumentsLength();
+  "use strict";
+  var numberOfSubstitutions = arguments.length;
   var cooked = TO_OBJECT(callSite);
   var raw = TO_OBJECT(cooked.raw);
   var literalSegments = TO_LENGTH(raw.length);
@@ -1024,7 +841,7 @@
 
   for (var i = 1; i < literalSegments; ++i) {
     if (i < numberOfSubstitutions) {
-      result += TO_STRING(%_Arguments(i));
+      result += TO_STRING(arguments[i]);
     }
     result += TO_STRING(raw[i]);
   }
@@ -1058,11 +875,11 @@
   "concat", StringConcat,
   "endsWith", StringEndsWith,
   "includes", StringIncludes,
-  "indexOf", StringIndexOfJS,
-  "lastIndexOf", StringLastIndexOfJS,
+  "indexOf", StringIndexOf,
+  "lastIndexOf", StringLastIndexOf,
   "localeCompare", StringLocaleCompareJS,
   "match", StringMatchJS,
-  "normalize", StringNormalizeJS,
+  "normalize", StringNormalize,
   "repeat", StringRepeat,
   "replace", StringReplace,
   "search", StringSearch,
@@ -1098,9 +915,10 @@
 // Exports
 
 utils.Export(function(to) {
+  to.ExpandReplacement = ExpandReplacement;
   to.StringCharAt = StringCharAtJS;
-  to.StringIndexOf = StringIndexOfJS;
-  to.StringLastIndexOf = StringLastIndexOfJS;
+  to.StringIndexOf = StringIndexOf;
+  to.StringLastIndexOf = StringLastIndexOf;
   to.StringMatch = StringMatchJS;
   to.StringReplace = StringReplace;
   to.StringSlice = StringSlice;
diff --git a/src/js/symbol.js b/src/js/symbol.js
index 5be6e01..ae54369 100644
--- a/src/js/symbol.js
+++ b/src/js/symbol.js
@@ -11,7 +11,6 @@
 // -------------------------------------------------------------------
 // Imports
 
-var GlobalObject = global.Object;
 var GlobalSymbol = global.Symbol;
 var hasInstanceSymbol = utils.ImportNow("has_instance_symbol");
 var isConcatSpreadableSymbol =
@@ -73,22 +72,11 @@
   return %SymbolRegistry().keyFor[symbol];
 }
 
-
-// ES6 19.1.2.8
-function ObjectGetOwnPropertySymbols(obj) {
-  obj = TO_OBJECT(obj);
-
-  return %GetOwnPropertyKeys(obj, PROPERTY_FILTER_SKIP_STRINGS);
-}
-
 // -------------------------------------------------------------------
 
-%FunctionSetPrototype(GlobalSymbol, new GlobalObject());
-
 utils.InstallConstants(GlobalSymbol, [
-  // TODO(rossberg): expose when implemented.
-  // "hasInstance", hasInstanceSymbol,
-  // "isConcatSpreadable", isConcatSpreadableSymbol,
+  "hasInstance", hasInstanceSymbol,
+  "isConcatSpreadable", isConcatSpreadableSymbol,
   "iterator", iteratorSymbol,
   // TODO(yangguo): expose when implemented.
   // "match", matchSymbol,
@@ -108,8 +96,6 @@
 ]);
 
 %AddNamedProperty(
-    GlobalSymbol.prototype, "constructor", GlobalSymbol, DONT_ENUM);
-%AddNamedProperty(
     GlobalSymbol.prototype, toStringTagSymbol, "Symbol", DONT_ENUM | READ_ONLY);
 
 utils.InstallFunctions(GlobalSymbol.prototype, DONT_ENUM | READ_ONLY, [
@@ -121,10 +107,6 @@
   "valueOf", SymbolValueOf
 ]);
 
-utils.InstallFunctions(GlobalObject, DONT_ENUM, [
-  "getOwnPropertySymbols", ObjectGetOwnPropertySymbols
-]);
-
 // -------------------------------------------------------------------
 // Exports
 
diff --git a/src/js/typedarray.js b/src/js/typedarray.js
index fd668a5..3d500a3 100644
--- a/src/js/typedarray.js
+++ b/src/js/typedarray.js
@@ -300,8 +300,10 @@
   var newLength = endInt - beginInt;
   var beginByteOffset =
       %_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
-  return TypedArraySpeciesCreate(this, %TypedArrayGetBuffer(this),
-                                 beginByteOffset, newLength, true);
+  // BUG(v8:4665): For web compatibility, subarray needs to always build an
+  // instance of the default constructor.
+  // TODO(littledan): Switch to the standard or standardize the fix
+  return new GlobalNAME(%TypedArrayGetBuffer(this), beginByteOffset, newLength);
 }
 endmacro
 
@@ -460,6 +462,7 @@
       return;
   }
 }
+%FunctionSetLength(TypedArraySet, 1);
 
 function TypedArrayGetToStringTag() {
   if (!%_IsTypedArray(this)) return;
@@ -564,22 +567,20 @@
 
 
 function TypedArrayComparefn(x, y) {
-  if (IsNaN(x) && IsNaN(y)) {
-    return IsNaN(y) ? 0 : 1;
+  if (x === 0 && x === y) {
+    x = 1 / x;
+    y = 1 / y;
   }
-  if (IsNaN(x)) {
+  if (x < y) {
+    return -1;
+  } else if (x > y) {
+    return 1;
+  } else if (IsNaN(x) && IsNaN(y)) {
+    return IsNaN(y) ? 0 : 1;
+  } else if (IsNaN(x)) {
     return 1;
   }
-  if (x === 0 && x === y) {
-    if (%_IsMinusZero(x)) {
-      if (!%_IsMinusZero(y)) {
-        return -1;
-      }
-    } else if (%_IsMinusZero(y)) {
-      return 1;
-    }
-  }
-  return x - y;
+  return 0;
 }
 
 
@@ -614,7 +615,7 @@
   var length = %_TypedArrayGetLength(this);
 
   return InnerArrayLastIndexOf(this, element, index, length,
-                        %_ArgumentsLength());
+                               arguments.length);
 }
 %FunctionSetLength(TypedArrayLastIndexOf, 1);
 
@@ -678,7 +679,7 @@
 
   var length = %_TypedArrayGetLength(this);
   return InnerArrayReduce(callback, current, this, length,
-                          %_ArgumentsLength());
+                          arguments.length);
 }
 %FunctionSetLength(TypedArrayReduce, 1);
 
@@ -689,7 +690,7 @@
 
   var length = %_TypedArrayGetLength(this);
   return InnerArrayReduceRight(callback, current, this, length,
-                               %_ArgumentsLength());
+                               arguments.length);
 }
 %FunctionSetLength(TypedArrayReduceRight, 1);
 
@@ -750,10 +751,10 @@
 
 // ES6 draft 08-24-14, section 22.2.2.2
 function TypedArrayOf() {
-  var length = %_ArgumentsLength();
+  var length = arguments.length;
   var array = TypedArrayCreate(this, length);
   for (var i = 0; i < length; i++) {
-    array[i] = %_Arguments(i);
+    array[i] = arguments[i];
   }
   return array;
 }
@@ -846,36 +847,6 @@
 
 // --------------------------- DataView -----------------------------
 
-function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
-  if (IS_UNDEFINED(new.target)) {
-    throw MakeTypeError(kConstructorNotFunction, "DataView");
-  }
-
-  // TODO(binji): support SharedArrayBuffers?
-  if (!IS_ARRAYBUFFER(buffer)) throw MakeTypeError(kDataViewNotArrayBuffer);
-  if (!IS_UNDEFINED(byteOffset)) {
-    byteOffset = ToPositiveInteger(byteOffset, kInvalidDataViewOffset);
-  }
-  if (!IS_UNDEFINED(byteLength)) {
-    byteLength = TO_INTEGER(byteLength);
-  }
-
-  var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
-
-  var offset = IS_UNDEFINED(byteOffset) ?  0 : byteOffset;
-  if (offset > bufferByteLength) throw MakeRangeError(kInvalidDataViewOffset);
-
-  var length = IS_UNDEFINED(byteLength)
-      ? bufferByteLength - offset
-      : byteLength;
-  if (length < 0 || offset + length > bufferByteLength) {
-    throw new MakeRangeError(kInvalidDataViewLength);
-  }
-  var result = %NewObject(GlobalDataView, new.target);
-  %_DataViewInitialize(result, buffer, offset, length);
-  return result;
-}
-
 function DataViewGetBufferJS() {
   if (!IS_DATAVIEW(this)) {
     throw MakeTypeError(kIncompatibleMethodReceiver, 'DataView.buffer', this);
@@ -917,26 +888,27 @@
     throw MakeTypeError(kIncompatibleMethodReceiver,
                         'DataView.getTYPENAME', this);
   }
-  if (%_ArgumentsLength() < 1) throw MakeTypeError(kInvalidArgument);
+  if (arguments.length < 1) throw MakeTypeError(kInvalidArgument);
   offset = ToPositiveInteger(offset, kInvalidDataViewAccessorOffset);
   return %DataViewGetTYPENAME(this, offset, !!little_endian);
 }
+%FunctionSetLength(DataViewGetTYPENAMEJS, 1);
 
 function DataViewSetTYPENAMEJS(offset, value, little_endian) {
   if (!IS_DATAVIEW(this)) {
     throw MakeTypeError(kIncompatibleMethodReceiver,
                         'DataView.setTYPENAME', this);
   }
-  if (%_ArgumentsLength() < 2) throw MakeTypeError(kInvalidArgument);
+  if (arguments.length < 2) throw MakeTypeError(kInvalidArgument);
   offset = ToPositiveInteger(offset, kInvalidDataViewAccessorOffset);
   %DataViewSetTYPENAME(this, offset, TO_NUMBER(value), !!little_endian);
 }
+%FunctionSetLength(DataViewSetTYPENAMEJS, 2);
 endmacro
 
 DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
 
 // Setup the DataView constructor.
-%SetCode(GlobalDataView, DataViewConstructor);
 %FunctionSetPrototype(GlobalDataView, new GlobalObject);
 
 // Set up constructor property on the DataView prototype.
diff --git a/src/js/v8natives.js b/src/js/v8natives.js
index 26447da..5e1a825 100644
--- a/src/js/v8natives.js
+++ b/src/js/v8natives.js
@@ -10,7 +10,6 @@
 // Imports
 
 var GlobalArray = global.Array;
-var GlobalBoolean = global.Boolean;
 var GlobalNumber = global.Number;
 var GlobalObject = global.Object;
 var InternalArray = utils.InternalArray;
@@ -24,7 +23,6 @@
 var ObserveBeginPerformSplice;
 var ObserveEndPerformSplice;
 var ObserveEnqueueSpliceRecord;
-var SameValue = utils.ImportNow("SameValue");
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
 utils.Import(function(from) {
@@ -545,17 +543,17 @@
     if ((IsGenericDescriptor(desc) ||
          IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
         (!desc.hasEnumerable() ||
-         SameValue(desc.isEnumerable(), current.isEnumerable())) &&
+         %SameValue(desc.isEnumerable(), current.isEnumerable())) &&
         (!desc.hasConfigurable() ||
-         SameValue(desc.isConfigurable(), current.isConfigurable())) &&
+         %SameValue(desc.isConfigurable(), current.isConfigurable())) &&
         (!desc.hasWritable() ||
-         SameValue(desc.isWritable(), current.isWritable())) &&
+         %SameValue(desc.isWritable(), current.isWritable())) &&
         (!desc.hasValue() ||
-         SameValue(desc.getValue(), current.getValue())) &&
+         %SameValue(desc.getValue(), current.getValue())) &&
         (!desc.hasGetter() ||
-         SameValue(desc.getGet(), current.getGet())) &&
+         %SameValue(desc.getGet(), current.getGet())) &&
         (!desc.hasSetter() ||
-         SameValue(desc.getSet(), current.getSet()))) {
+         %SameValue(desc.getSet(), current.getSet()))) {
       return true;
     }
     if (!current.isConfigurable()) {
@@ -594,7 +592,7 @@
             }
           }
           if (!currentIsWritable && desc.hasValue() &&
-              !SameValue(desc.getValue(), current.getValue())) {
+              !%SameValue(desc.getValue(), current.getValue())) {
             if (should_throw) {
               throw MakeTypeError(kRedefineDisallowed, p);
             } else {
@@ -605,14 +603,14 @@
         // Step 11
         if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
           if (desc.hasSetter() &&
-              !SameValue(desc.getSet(), current.getSet())) {
+              !%SameValue(desc.getSet(), current.getSet())) {
             if (should_throw) {
               throw MakeTypeError(kRedefineDisallowed, p);
             } else {
               return false;
             }
           }
-          if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
+          if (desc.hasGetter() && !%SameValue(desc.getGet(),current.getGet())) {
             if (should_throw) {
               throw MakeTypeError(kRedefineDisallowed, p);
             } else {
@@ -772,19 +770,6 @@
 }
 
 
-// ES6 section 19.1.2.6
-function ObjectGetOwnPropertyDescriptor(obj, p) {
-  return %GetOwnProperty(obj, p);
-}
-
-
-// ES5 section 15.2.3.4.
-function ObjectGetOwnPropertyNames(obj) {
-  obj = TO_OBJECT(obj);
-  return %GetOwnPropertyKeys(obj, PROPERTY_FILTER_SKIP_SYMBOLS);
-}
-
-
 // ES5 section 15.2.3.6.
 function ObjectDefineProperty(obj, p, attributes) {
   // The new pure-C++ implementation doesn't support O.o.
@@ -802,11 +787,6 @@
 }
 
 
-function GetOwnEnumerablePropertyNames(object) {
-  return %GetOwnPropertyKeys(object, PROPERTY_FILTER_ONLY_ENUMERABLE);
-}
-
-
 // ES5 section 15.2.3.7.
 function ObjectDefineProperties(obj, properties) {
   // The new pure-C++ implementation doesn't support O.o.
@@ -816,7 +796,7 @@
       throw MakeTypeError(kCalledOnNonObject, "Object.defineProperties");
     }
     var props = TO_OBJECT(properties);
-    var names = GetOwnEnumerablePropertyNames(props);
+    var names = %GetOwnPropertyKeys(props, PROPERTY_FILTER_ONLY_ENUMERABLE);
     var descriptors = new InternalArray();
     for (var i = 0; i < names.length; i++) {
       descriptors.push(ToPropertyDescriptor(props[names[i]]));
@@ -889,65 +869,13 @@
   "defineProperties", ObjectDefineProperties,
   "getPrototypeOf", ObjectGetPrototypeOf,
   "setPrototypeOf", ObjectSetPrototypeOf,
-  "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
-  "getOwnPropertyNames", ObjectGetOwnPropertyNames,
   // getOwnPropertySymbols is added in symbol.js.
-  "is", SameValue,  // ECMA-262, Edition 6, section 19.1.2.10
+  // is is added in bootstrapper.cc.
   // deliverChangeRecords, getNotifier, observe and unobserve are added
   // in object-observe.js.
 ]);
 
 
-// ----------------------------------------------------------------------------
-// Boolean
-
-function BooleanConstructor(x) {
-  // TODO(bmeurer): Move this to toplevel.
-  "use strict";
-  if (!IS_UNDEFINED(new.target)) {
-    %_SetValueOf(this, TO_BOOLEAN(x));
-  } else {
-    return TO_BOOLEAN(x);
-  }
-}
-
-
-function BooleanToString() {
-  // NOTE: Both Boolean objects and values can enter here as
-  // 'this'. This is not as dictated by ECMA-262.
-  var b = this;
-  if (!IS_BOOLEAN(b)) {
-    if (!IS_BOOLEAN_WRAPPER(b)) {
-      throw MakeTypeError(kNotGeneric, 'Boolean.prototype.toString');
-    }
-    b = %_ValueOf(b);
-  }
-  return b ? 'true' : 'false';
-}
-
-
-function BooleanValueOf() {
-  // NOTE: Both Boolean objects and values can enter here as
-  // 'this'. This is not as dictated by ECMA-262.
-  if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this)) {
-    throw MakeTypeError(kNotGeneric, 'Boolean.prototype.valueOf');
-  }
-  return %_ValueOf(this);
-}
-
-
-// ----------------------------------------------------------------------------
-
-%SetCode(GlobalBoolean, BooleanConstructor);
-%FunctionSetPrototype(GlobalBoolean, new GlobalBoolean(false));
-%AddNamedProperty(GlobalBoolean.prototype, "constructor", GlobalBoolean,
-                  DONT_ENUM);
-
-utils.InstallFunctions(GlobalBoolean.prototype, DONT_ENUM, [
-  "toString", BooleanToString,
-  "valueOf", BooleanValueOf
-]);
-
 
 // ----------------------------------------------------------------------------
 // Number
diff --git a/src/parsing/json-parser.h b/src/json-parser.h
similarity index 96%
rename from src/parsing/json-parser.h
rename to src/json-parser.h
index e23c733..efd3c04 100644
--- a/src/parsing/json-parser.h
+++ b/src/json-parser.h
@@ -2,18 +2,18 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_PARSING_JSON_PARSER_H_
-#define V8_PARSING_JSON_PARSER_H_
+#ifndef V8_JSON_PARSER_H_
+#define V8_JSON_PARSER_H_
 
 #include "src/char-predicates.h"
 #include "src/conversions.h"
 #include "src/debug/debug.h"
 #include "src/factory.h"
+#include "src/field-type.h"
 #include "src/messages.h"
 #include "src/parsing/scanner.h"
 #include "src/parsing/token.h"
 #include "src/transitions.h"
-#include "src/types.h"
 
 namespace v8 {
 namespace internal {
@@ -128,7 +128,9 @@
   }
 
   Handle<String> ParseJsonInternalizedString() {
-    return ScanJsonString<true>();
+    Handle<String> result = ScanJsonString<true>();
+    if (result.is_null()) return result;
+    return factory()->InternalizeString(result);
   }
 
   template <bool is_internalized>
@@ -217,11 +219,12 @@
     // Parse failed. Current character is the unexpected token.
     Factory* factory = this->factory();
     MessageTemplate::Template message;
-    Handle<String> argument;
+    Handle<Object> arg1 = Handle<Smi>(Smi::FromInt(position_), isolate());
+    Handle<Object> arg2;
 
     switch (c0_) {
       case kEndOfString:
-        message = MessageTemplate::kUnexpectedEOS;
+        message = MessageTemplate::kJsonParseUnexpectedEOS;
         break;
       case '-':
       case '0':
@@ -234,14 +237,15 @@
       case '7':
       case '8':
       case '9':
-        message = MessageTemplate::kUnexpectedTokenNumber;
+        message = MessageTemplate::kJsonParseUnexpectedTokenNumber;
         break;
       case '"':
-        message = MessageTemplate::kUnexpectedTokenString;
+        message = MessageTemplate::kJsonParseUnexpectedTokenString;
         break;
       default:
-        message = MessageTemplate::kUnexpectedToken;
-        argument = factory->LookupSingleCharacterStringFromCode(c0_);
+        message = MessageTemplate::kJsonParseUnexpectedToken;
+        arg2 = arg1;
+        arg1 = factory->LookupSingleCharacterStringFromCode(c0_);
         break;
     }
 
@@ -250,7 +254,7 @@
     // separated source file.
     isolate()->debug()->OnCompileError(script);
     MessageLocation location(script, position_, position_ + 1);
-    Handle<Object> error = factory->NewSyntaxError(message, argument);
+    Handle<Object> error = factory->NewSyntaxError(message, arg1, arg2);
     return isolate()->template Throw<Object>(error, &location);
   }
   return result;
@@ -416,7 +420,7 @@
               !target->instance_descriptors()
                    ->GetFieldType(descriptor)
                    ->NowContains(value)) {
-            Handle<HeapType> value_type(
+            Handle<FieldType> value_type(
                 value->OptimalType(isolate(), expected_representation));
             Map::GeneralizeFieldType(target, descriptor,
                                      expected_representation, value_type);
@@ -839,4 +843,4 @@
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_PARSING_JSON_PARSER_H_
+#endif  // V8_JSON_PARSER_H_
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index 5c0459e..d97ca2b 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -567,8 +567,7 @@
     Handle<FixedArray> contents;
     ASSIGN_RETURN_ON_EXCEPTION_VALUE(
         isolate_, contents,
-        JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY, ENUMERABLE_STRINGS),
-        EXCEPTION);
+        JSReceiver::GetKeys(object, OWN_ONLY, ENUMERABLE_STRINGS), EXCEPTION);
 
     for (int i = 0; i < contents->length(); i++) {
       Object* key = contents->get(i);
diff --git a/src/key-accumulator.cc b/src/key-accumulator.cc
index e7a9c3c..c2c4996 100644
--- a/src/key-accumulator.cc
+++ b/src/key-accumulator.cc
@@ -29,6 +29,9 @@
   // Make sure we have all the lengths collected.
   NextPrototype();
 
+  if (type_ == OWN_ONLY && !ownProxyKeys_.is_null()) {
+    return ownProxyKeys_;
+  }
   // Assemble the result array by first adding the element keys and then the
   // property keys. We use the total number of String + Symbol keys per level in
   // |level_lengths_| and the available element keys in the corresponding bucket
@@ -260,7 +263,13 @@
   // Proxies define a complete list of keys with no distinction of
   // elements and properties, which breaks the normal assumption for the
   // KeyAccumulator.
-  AddKeys(keys, PROXY_MAGIC);
+  if (type_ == OWN_ONLY) {
+    ownProxyKeys_ = keys;
+    level_string_length_ = keys->length();
+    length_ = level_string_length_;
+  } else {
+    AddKeys(keys, PROXY_MAGIC);
+  }
   // Invert the current length to indicate a present proxy, so we can ignore
   // element keys for this level. Otherwise we would not fully respect the order
   // given by the proxy.
diff --git a/src/key-accumulator.h b/src/key-accumulator.h
index 8a4d886..9daee10 100644
--- a/src/key-accumulator.h
+++ b/src/key-accumulator.h
@@ -31,17 +31,16 @@
 // are more compact and allow for reasonably fast includes check.
 class KeyAccumulator final BASE_EMBEDDED {
  public:
-  KeyAccumulator(Isolate* isolate, PropertyFilter filter)
-      : isolate_(isolate), filter_(filter) {}
+  KeyAccumulator(Isolate* isolate, KeyCollectionType type,
+                 PropertyFilter filter)
+      : isolate_(isolate), type_(type), filter_(filter) {}
   ~KeyAccumulator();
 
   bool AddKey(uint32_t key);
-  bool AddKey(Object* key, AddKeyConversion convert = DO_NOT_CONVERT);
-  bool AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
-  void AddKeys(Handle<FixedArray> array,
-               AddKeyConversion convert = DO_NOT_CONVERT);
-  void AddKeys(Handle<JSObject> array,
-               AddKeyConversion convert = DO_NOT_CONVERT);
+  bool AddKey(Object* key, AddKeyConversion convert);
+  bool AddKey(Handle<Object> key, AddKeyConversion convert);
+  void AddKeys(Handle<FixedArray> array, AddKeyConversion convert);
+  void AddKeys(Handle<JSObject> array, AddKeyConversion convert);
   void AddKeysFromProxy(Handle<JSObject> array);
   Maybe<bool> AddKeysFromProxy(Handle<JSProxy> proxy, Handle<FixedArray> keys);
   void AddElementKeysFromInterceptor(Handle<JSObject> array);
@@ -61,6 +60,7 @@
   void SortCurrentElementsListRemoveDuplicates();
 
   Isolate* isolate_;
+  KeyCollectionType type_;
   PropertyFilter filter_;
   // |elements_| contains the sorted element keys (indices) per level.
   std::vector<std::vector<uint32_t>*> elements_;
@@ -73,6 +73,7 @@
   // |symbol_properties_| contains the unique Symbol property keys for all
   // levels in insertion order per level.
   Handle<OrderedHashSet> symbol_properties_;
+  Handle<FixedArray> ownProxyKeys_;
   // |length_| keeps track of the total number of all element and property keys.
   int length_ = 0;
   // |levelLength_| keeps track of the number of String keys in the current
diff --git a/src/libplatform/default-platform.cc b/src/libplatform/default-platform.cc
index e8c1557..6902504 100644
--- a/src/libplatform/default-platform.cc
+++ b/src/libplatform/default-platform.cc
@@ -193,5 +193,10 @@
   static const char dummy[] = "dummy";
   return dummy;
 }
+
+size_t DefaultPlatform::NumberOfAvailableBackgroundThreads() {
+  return static_cast<size_t>(thread_pool_size_);
+}
+
 }  // namespace platform
 }  // namespace v8
diff --git a/src/libplatform/default-platform.h b/src/libplatform/default-platform.h
index 8bdda95..2c428ee 100644
--- a/src/libplatform/default-platform.h
+++ b/src/libplatform/default-platform.h
@@ -34,6 +34,7 @@
   bool PumpMessageLoop(v8::Isolate* isolate);
 
   // v8::Platform implementation.
+  size_t NumberOfAvailableBackgroundThreads() override;
   void CallOnBackgroundThread(Task* task,
                               ExpectedRuntime expected_runtime) override;
   void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override;
diff --git a/src/list.h b/src/list.h
index 8b8a5dd..83e5f45 100644
--- a/src/list.h
+++ b/src/list.h
@@ -207,15 +207,13 @@
 
 
 class Map;
-template<class> class TypeImpl;
-struct HeapTypeConfig;
-typedef TypeImpl<HeapTypeConfig> HeapType;
+class FieldType;
 class Code;
 template<typename T> class Handle;
 typedef List<Map*> MapList;
 typedef List<Code*> CodeList;
 typedef List<Handle<Map> > MapHandleList;
-typedef List<Handle<HeapType> > TypeHandleList;
+typedef List<Handle<FieldType> > TypeHandleList;
 typedef List<Handle<Code> > CodeHandleList;
 
 // Perform binary search for an element in an already sorted
diff --git a/src/log-inl.h b/src/log-inl.h
index d47a24b..765398f 100644
--- a/src/log-inl.h
+++ b/src/log-inl.h
@@ -38,19 +38,6 @@
       isolate->event_logger()(name, se);
     }
   }
-  if (expose_to_api) {
-    if (se == START) {
-      TRACE_EVENT_BEGIN0("v8", name);
-    } else {
-      TRACE_EVENT_END0("v8", name);
-    }
-  } else {
-    if (se == START) {
-      TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), name);
-    } else {
-      TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), name);
-    }
-  }
 }
 }  // namespace internal
 }  // namespace v8
diff --git a/src/log.cc b/src/log.cc
index a10d962..cbdd9dd 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -934,6 +934,7 @@
 
 void Logger::EnterExternal(Isolate* isolate) {
   LOG(isolate, TimerEvent(START, TimerEventExternal::name()));
+  TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
   DCHECK(isolate->current_vm_state() == JS);
   isolate->set_current_vm_state(EXTERNAL);
 }
@@ -941,6 +942,7 @@
 
 void Logger::LeaveExternal(Isolate* isolate) {
   LOG(isolate, TimerEvent(END, TimerEventExternal::name()));
+  TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
   DCHECK(isolate->current_vm_state() == EXTERNAL);
   isolate->set_current_vm_state(JS);
 }
@@ -1497,11 +1499,7 @@
   msg.Append(",%ld", static_cast<int>(timer_.Elapsed().InMicroseconds()));
   if (sample->has_external_callback) {
     msg.Append(",1,");
-#if USES_FUNCTION_DESCRIPTORS
-    msg.AppendAddress(*FUNCTION_ENTRYPOINT_ADDRESS(sample->external_callback));
-#else
-    msg.AppendAddress(sample->external_callback);
-#endif
+    msg.AppendAddress(sample->external_callback_entry);
   } else {
     msg.Append(",0,");
     msg.AppendAddress(sample->tos);
@@ -1712,6 +1710,9 @@
       CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
       Object* callback_obj = call_data->callback();
       Address entry_point = v8::ToCData<Address>(callback_obj);
+#if USES_FUNCTION_DESCRIPTORS
+      entry_point = *FUNCTION_ENTRYPOINT_ADDRESS(entry_point);
+#endif
       PROFILE(isolate_, CallbackEvent(*func_name, entry_point));
     }
   } else {
@@ -1749,16 +1750,22 @@
   HeapIterator iterator(heap);
   DisallowHeapAllocation no_gc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
-    if (!obj->IsExecutableAccessorInfo()) continue;
-    ExecutableAccessorInfo* ai = ExecutableAccessorInfo::cast(obj);
+    if (!obj->IsAccessorInfo()) continue;
+    AccessorInfo* ai = AccessorInfo::cast(obj);
     if (!ai->name()->IsName()) continue;
     Address getter_entry = v8::ToCData<Address>(ai->getter());
     Name* name = Name::cast(ai->name());
     if (getter_entry != 0) {
+#if USES_FUNCTION_DESCRIPTORS
+      getter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(getter_entry);
+#endif
       PROFILE(isolate_, GetterCallbackEvent(name, getter_entry));
     }
     Address setter_entry = v8::ToCData<Address>(ai->setter());
     if (setter_entry != 0) {
+#if USES_FUNCTION_DESCRIPTORS
+      setter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(setter_entry);
+#endif
       PROFILE(isolate_, SetterCallbackEvent(name, setter_entry));
     }
   }
diff --git a/src/log.h b/src/log.h
index 064115b..1a454da 100644
--- a/src/log.h
+++ b/src/log.h
@@ -410,11 +410,13 @@
   friend class CpuProfiler;
 };
 
-
 #define TIMER_EVENTS_LIST(V)    \
   V(RecompileSynchronous, true) \
   V(RecompileConcurrent, true)  \
   V(CompileFullCode, true)      \
+  V(OptimizeCode, true)         \
+  V(CompileCode, true)          \
+  V(DeoptimizeCode, true)       \
   V(Execute, true)              \
   V(External, true)             \
   V(IcMiss, false)
diff --git a/src/lookup.cc b/src/lookup.cc
index 48da4fa..bad5a20 100644
--- a/src/lookup.cc
+++ b/src/lookup.cc
@@ -7,6 +7,7 @@
 #include "src/bootstrapper.h"
 #include "src/deoptimizer.h"
 #include "src/elements.h"
+#include "src/field-type.h"
 #include "src/isolate-inl.h"
 
 namespace v8 {
@@ -52,7 +53,7 @@
   has_property_ = false;
 
   JSReceiver* holder = *holder_;
-  Map* map = *holder_map_;
+  Map* map = holder->map();
 
   // Perform lookup on current holder.
   state_ = LookupInHolder(map, holder);
@@ -73,10 +74,7 @@
     state_ = LookupInHolder(map, holder);
   } while (!IsFound());
 
-  if (holder != *holder_) {
-    holder_ = handle(holder, isolate_);
-    holder_map_ = handle(map, isolate_);
-  }
+  if (holder != *holder_) holder_ = handle(holder, isolate_);
 }
 
 
@@ -85,7 +83,6 @@
   interceptor_state_ = interceptor_state;
   property_details_ = PropertyDetails::Empty();
   holder_ = initial_holder_;
-  holder_map_ = handle(holder_->map(), isolate_);
   number_ = DescriptorArray::kNotFound;
   Next();
 }
@@ -122,9 +119,10 @@
 
 Handle<JSObject> LookupIterator::GetStoreTarget() const {
   if (receiver_->IsJSGlobalProxy()) {
-    PrototypeIterator iter(isolate(), receiver_);
-    if (iter.IsAtEnd()) return Handle<JSGlobalProxy>::cast(receiver_);
-    return PrototypeIterator::GetCurrent<JSGlobalObject>(iter);
+    Object* prototype = JSGlobalProxy::cast(*receiver_)->map()->prototype();
+    if (!prototype->IsNull()) {
+      return handle(JSGlobalObject::cast(prototype), isolate_);
+    }
   }
   return Handle<JSObject>::cast(receiver_);
 }
@@ -140,21 +138,57 @@
 void LookupIterator::ReloadPropertyInformation() {
   state_ = BEFORE_PROPERTY;
   interceptor_state_ = InterceptorState::kUninitialized;
-  state_ = LookupInHolder(*holder_map_, *holder_);
-  DCHECK(IsFound() || holder_map_->is_dictionary_map());
+  state_ = LookupInHolder(holder_->map(), *holder_);
+  DCHECK(IsFound() || !holder_->HasFastProperties());
 }
 
+bool LookupIterator::HolderIsInContextIndex(uint32_t index) const {
+  DisallowHeapAllocation no_gc;
 
-void LookupIterator::ReloadHolderMap() {
-  DCHECK_EQ(DATA, state_);
-  DCHECK(IsElement());
-  DCHECK(JSObject::cast(*holder_)->HasFixedTypedArrayElements());
-  if (*holder_map_ != holder_->map()) {
-    holder_map_ = handle(holder_->map(), isolate_);
+  Object* context = heap()->native_contexts_list();
+  while (!context->IsUndefined()) {
+    Context* current_context = Context::cast(context);
+    if (current_context->get(index) == *holder_) {
+      return true;
+    }
+    context = current_context->get(Context::NEXT_CONTEXT_LINK);
+  }
+  return false;
+}
+
+void LookupIterator::UpdateProtector() {
+  if (!FLAG_harmony_species) return;
+
+  if (IsElement()) return;
+  if (isolate_->bootstrapper()->IsActive()) return;
+  if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
+
+  if (*name_ == *isolate_->factory()->constructor_string()) {
+    // Setting the constructor property could change an instance's @@species
+    if (holder_->IsJSArray()) {
+      isolate_->CountUsage(
+          v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
+      isolate_->InvalidateArraySpeciesProtector();
+    } else if (holder_->map()->is_prototype_map()) {
+      // Setting the constructor of Array.prototype of any realm also needs
+      // to invalidate the species protector
+      if (HolderIsInContextIndex(Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
+        isolate_->CountUsage(v8::Isolate::UseCounterFeature::
+                                 kArrayPrototypeConstructorModified);
+        isolate_->InvalidateArraySpeciesProtector();
+      }
+    }
+  } else if (*name_ == *isolate_->factory()->species_symbol()) {
+    // Setting the Symbol.species property of any Array constructor invalidates
+    // the species protector
+    if (HolderIsInContextIndex(Context::ARRAY_FUNCTION_INDEX)) {
+      isolate_->CountUsage(
+          v8::Isolate::UseCounterFeature::kArraySpeciesModified);
+      isolate_->InvalidateArraySpeciesProtector();
+    }
   }
 }
 
-
 void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
   DCHECK(state_ == DATA || state_ == ACCESSOR);
   DCHECK(HolderIsReceiverOrHiddenPrototype());
@@ -162,25 +196,38 @@
   Handle<JSObject> holder = GetHolder<JSObject>();
 
   if (IsElement()) {
-    ElementsKind kind = holder_map_->elements_kind();
+    ElementsKind kind = holder->GetElementsKind();
     ElementsKind to = value->OptimalElementsKind();
     if (IsHoleyElementsKind(kind)) to = GetHoleyElementsKind(to);
     to = GetMoreGeneralElementsKind(kind, to);
-    JSObject::TransitionElementsKind(holder, to);
-    holder_map_ = handle(holder->map(), isolate_);
+
+    if (kind != to) {
+      JSObject::TransitionElementsKind(holder, to);
+    }
 
     // Copy the backing store if it is copy-on-write.
     if (IsFastSmiOrObjectElementsKind(to)) {
       JSObject::EnsureWritableFastElements(holder);
     }
-
-  } else {
-    if (holder_map_->is_dictionary_map()) return;
-    holder_map_ =
-        Map::PrepareForDataProperty(holder_map_, descriptor_number(), value);
+    return;
   }
 
-  JSObject::MigrateToMap(holder, holder_map_);
+  if (!holder->HasFastProperties()) return;
+
+  Handle<Map> old_map(holder->map(), isolate_);
+  Handle<Map> new_map =
+      Map::PrepareForDataProperty(old_map, descriptor_number(), value);
+
+  if (old_map.is_identical_to(new_map)) {
+    // Update the property details if the representation was None.
+    if (representation().IsNone()) {
+      property_details_ =
+          new_map->instance_descriptors()->GetDetails(descriptor_number());
+    }
+    return;
+  }
+
+  JSObject::MigrateToMap(holder, new_map);
   ReloadPropertyInformation();
 }
 
@@ -196,16 +243,16 @@
     Handle<FixedArrayBase> elements(holder->elements());
     holder->GetElementsAccessor()->Reconfigure(holder, elements, number_, value,
                                                attributes);
-  } else if (holder_map_->is_dictionary_map()) {
+  } else if (!holder->HasFastProperties()) {
     PropertyDetails details(attributes, v8::internal::DATA, 0,
                             PropertyCellType::kMutable);
     JSObject::SetNormalizedProperty(holder, name(), value, details);
   } else {
-    holder_map_ = Map::ReconfigureExistingProperty(
-        holder_map_, descriptor_number(), i::kData, attributes);
-    holder_map_ =
-        Map::PrepareForDataProperty(holder_map_, descriptor_number(), value);
-    JSObject::MigrateToMap(holder, holder_map_);
+    Handle<Map> old_map(holder->map(), isolate_);
+    Handle<Map> new_map = Map::ReconfigureExistingProperty(
+        old_map, descriptor_number(), i::kData, attributes);
+    new_map = Map::PrepareForDataProperty(new_map, descriptor_number(), value);
+    JSObject::MigrateToMap(holder, new_map);
   }
 
   ReloadPropertyInformation();
@@ -218,54 +265,66 @@
 #endif
 }
 
-
+// Can only be called when the receiver is a JSObject. JSProxy has to be handled
+// via a trap. Adding properties to primitive values is not observable.
 void LookupIterator::PrepareTransitionToDataProperty(
-    Handle<Object> value, PropertyAttributes attributes,
-    Object::StoreFromKeyed store_mode) {
+    Handle<JSObject> receiver, Handle<Object> value,
+    PropertyAttributes attributes, Object::StoreFromKeyed store_mode) {
+  DCHECK(receiver.is_identical_to(GetStoreTarget()));
   if (state_ == TRANSITION) return;
   DCHECK(state_ != LookupIterator::ACCESSOR ||
          (GetAccessors()->IsAccessorInfo() &&
           AccessorInfo::cast(*GetAccessors())->is_special_data_property()));
   DCHECK_NE(INTEGER_INDEXED_EXOTIC, state_);
   DCHECK(state_ == NOT_FOUND || !HolderIsReceiverOrHiddenPrototype());
-  // Can only be called when the receiver is a JSObject. JSProxy has to be
-  // handled via a trap. Adding properties to primitive values is not
-  // observable.
-  Handle<JSObject> receiver = GetStoreTarget();
 
-  if (!isolate()->IsInternallyUsedPropertyName(name()) &&
-      !receiver->map()->is_extensible()) {
+  Handle<Map> map(receiver->map(), isolate_);
+
+  // Dictionary maps can always have additional data properties.
+  if (map->is_dictionary_map()) {
+    state_ = TRANSITION;
+    if (map->IsJSGlobalObjectMap()) {
+      // Install a property cell.
+      auto cell = JSGlobalObject::EnsurePropertyCell(
+          Handle<JSGlobalObject>::cast(receiver), name());
+      DCHECK(cell->value()->IsTheHole());
+      transition_ = cell;
+    } else {
+      transition_ = map;
+    }
     return;
   }
 
-  auto transition = Map::TransitionToDataProperty(
-      handle(receiver->map(), isolate_), name_, value, attributes, store_mode);
+  Handle<Map> transition =
+      Map::TransitionToDataProperty(map, name_, value, attributes, store_mode);
   state_ = TRANSITION;
   transition_ = transition;
 
-  if (receiver->IsJSGlobalObject()) {
-    // Install a property cell.
-    InternalizeName();
-    auto cell = JSGlobalObject::EnsurePropertyCell(
-        Handle<JSGlobalObject>::cast(receiver), name());
-    DCHECK(cell->value()->IsTheHole());
-    transition_ = cell;
-  } else if (!transition->is_dictionary_map()) {
+  if (!transition->is_dictionary_map()) {
     property_details_ = transition->GetLastDescriptorDetails();
     has_property_ = true;
   }
 }
 
-
-void LookupIterator::ApplyTransitionToDataProperty() {
+void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
   DCHECK_EQ(TRANSITION, state_);
 
-  Handle<JSObject> receiver = GetStoreTarget();
+  DCHECK(receiver.is_identical_to(GetStoreTarget()));
+
   if (receiver->IsJSGlobalObject()) return;
   holder_ = receiver;
-  holder_map_ = transition_map();
-  JSObject::MigrateToMap(receiver, holder_map_);
-  ReloadPropertyInformation();
+  Handle<Map> transition = transition_map();
+  bool simple_transition = transition->GetBackPointer() == receiver->map();
+  JSObject::MigrateToMap(receiver, transition);
+
+  if (simple_transition) {
+    int number = transition->LastAdded();
+    number_ = static_cast<uint32_t>(number);
+    property_details_ = transition->GetLastDescriptorDetails();
+    state_ = DATA;
+  } else {
+    ReloadPropertyInformation();
+  }
 }
 
 
@@ -283,7 +342,6 @@
     if (holder->HasFastProperties()) {
       JSObject::NormalizeProperties(Handle<JSObject>::cast(holder), mode, 0,
                                     "DeletingProperty");
-      holder_map_ = handle(holder->map(), isolate_);
       ReloadPropertyInformation();
     }
     // TODO(verwaest): Get rid of the name_ argument.
@@ -292,6 +350,7 @@
       JSObject::ReoptimizeIfPrototype(Handle<JSObject>::cast(holder));
     }
   }
+  state_ = NOT_FOUND;
 }
 
 
@@ -306,14 +365,14 @@
 
   if (!IsElement() && !receiver->map()->is_dictionary_map()) {
     holder_ = receiver;
-    holder_map_ = Map::TransitionToAccessorProperty(
-        handle(receiver->map(), isolate_), name_, component, accessor,
-        attributes);
-    JSObject::MigrateToMap(receiver, holder_map_);
+    Handle<Map> old_map(receiver->map(), isolate_);
+    Handle<Map> new_map = Map::TransitionToAccessorProperty(
+        old_map, name_, component, accessor, attributes);
+    JSObject::MigrateToMap(receiver, new_map);
 
     ReloadPropertyInformation();
 
-    if (!holder_map_->is_dictionary_map()) return;
+    if (!new_map->is_dictionary_map()) return;
   }
 
   Handle<AccessorPair> pair;
@@ -383,34 +442,29 @@
     JSObject::ReoptimizeIfPrototype(receiver);
   }
 
-  holder_map_ = handle(receiver->map(), isolate_);
   ReloadPropertyInformation();
 }
 
 
 bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
   DCHECK(has_property_ || state_ == INTERCEPTOR || state_ == JSPROXY);
-  return InternalHolderIsReceiverOrHiddenPrototype();
-}
-
-bool LookupIterator::InternalHolderIsReceiverOrHiddenPrototype() const {
   // Optimization that only works if configuration_ is not mutable.
   if (!check_prototype_chain()) return true;
   DisallowHeapAllocation no_gc;
   if (!receiver_->IsJSReceiver()) return false;
-  Object* current = *receiver_;
-  JSReceiver* holder = *holder_;
+  JSReceiver* current = JSReceiver::cast(*receiver_);
+  JSReceiver* object = *holder_;
+  if (current == object) return true;
+  if (!current->map()->has_hidden_prototype()) return false;
   // JSProxy do not occur as hidden prototypes.
-  if (current->IsJSProxy()) {
-    return JSReceiver::cast(current) == holder;
-  }
+  if (current->IsJSProxy()) return false;
   PrototypeIterator iter(isolate(), current,
-                         PrototypeIterator::START_AT_RECEIVER);
-  do {
-    if (iter.GetCurrent<JSReceiver>() == holder) return true;
-    DCHECK(!current->IsJSProxy());
+                         PrototypeIterator::START_AT_PROTOTYPE,
+                         PrototypeIterator::END_AT_NON_HIDDEN);
+  while (!iter.IsAtEnd()) {
+    if (iter.GetCurrent<JSReceiver>() == object) return true;
     iter.Advance();
-  } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
+  }
   return false;
 }
 
@@ -419,30 +473,22 @@
   Object* result = NULL;
   if (IsElement()) {
     Handle<JSObject> holder = GetHolder<JSObject>();
-    // TODO(verwaest): Optimize.
-    if (holder->IsStringObjectWithCharacterAt(index_)) {
-      Handle<JSValue> js_value = Handle<JSValue>::cast(holder);
-      Handle<String> string(String::cast(js_value->value()));
-      return factory()->LookupSingleCharacterStringFromCode(
-          String::Flatten(string)->Get(index_));
-    }
-
     ElementsAccessor* accessor = holder->GetElementsAccessor();
-    return accessor->Get(handle(holder->elements()), number_);
-  } else if (holder_map_->IsJSGlobalObjectMap()) {
+    return accessor->Get(holder, number_);
+  } else if (holder_->IsJSGlobalObject()) {
     Handle<JSObject> holder = GetHolder<JSObject>();
     result = holder->global_dictionary()->ValueAt(number_);
     DCHECK(result->IsPropertyCell());
     result = PropertyCell::cast(result)->value();
-  } else if (holder_map_->is_dictionary_map()) {
+  } else if (!holder_->HasFastProperties()) {
     result = holder_->property_dictionary()->ValueAt(number_);
   } else if (property_details_.type() == v8::internal::DATA) {
     Handle<JSObject> holder = GetHolder<JSObject>();
-    FieldIndex field_index = FieldIndex::ForDescriptor(*holder_map_, number_);
+    FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
     return JSObject::FastPropertyAt(holder, property_details_.representation(),
                                     field_index);
   } else {
-    result = holder_map_->instance_descriptors()->GetValue(number_);
+    result = holder_->map()->instance_descriptors()->GetValue(number_);
   }
   return handle(result, isolate_);
 }
@@ -450,7 +496,7 @@
 
 int LookupIterator::GetAccessorIndex() const {
   DCHECK(has_property_);
-  DCHECK(!holder_map_->is_dictionary_map());
+  DCHECK(holder_->HasFastProperties());
   DCHECK_EQ(v8::internal::ACCESSOR_CONSTANT, property_details_.type());
   return descriptor_number();
 }
@@ -458,7 +504,7 @@
 
 int LookupIterator::GetConstantIndex() const {
   DCHECK(has_property_);
-  DCHECK(!holder_map_->is_dictionary_map());
+  DCHECK(holder_->HasFastProperties());
   DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
   DCHECK(!IsElement());
   return descriptor_number();
@@ -467,22 +513,22 @@
 
 FieldIndex LookupIterator::GetFieldIndex() const {
   DCHECK(has_property_);
-  DCHECK(!holder_map_->is_dictionary_map());
+  DCHECK(holder_->HasFastProperties());
   DCHECK_EQ(v8::internal::DATA, property_details_.type());
   DCHECK(!IsElement());
+  Map* holder_map = holder_->map();
   int index =
-      holder_map_->instance_descriptors()->GetFieldIndex(descriptor_number());
+      holder_map->instance_descriptors()->GetFieldIndex(descriptor_number());
   bool is_double = representation().IsDouble();
-  return FieldIndex::ForPropertyIndex(*holder_map_, index, is_double);
+  return FieldIndex::ForPropertyIndex(holder_map, index, is_double);
 }
 
-
-Handle<HeapType> LookupIterator::GetFieldType() const {
+Handle<FieldType> LookupIterator::GetFieldType() const {
   DCHECK(has_property_);
-  DCHECK(!holder_map_->is_dictionary_map());
+  DCHECK(holder_->HasFastProperties());
   DCHECK_EQ(v8::internal::DATA, property_details_.type());
   return handle(
-      holder_map_->instance_descriptors()->GetFieldType(descriptor_number()),
+      holder_->map()->instance_descriptors()->GetFieldType(descriptor_number()),
       isolate_);
 }
 
@@ -516,53 +562,26 @@
   if (IsElement()) {
     Handle<JSObject> object = Handle<JSObject>::cast(holder);
     ElementsAccessor* accessor = object->GetElementsAccessor();
-    accessor->Set(object->elements(), number_, *value);
+    accessor->Set(object, number_, *value);
+  } else if (holder->HasFastProperties()) {
+    if (property_details_.type() == v8::internal::DATA) {
+      JSObject::cast(*holder)->WriteToField(descriptor_number(),
+                                            property_details_, *value);
+    } else {
+      DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
+    }
   } else if (holder->IsJSGlobalObject()) {
     Handle<GlobalDictionary> property_dictionary =
         handle(JSObject::cast(*holder)->global_dictionary());
     PropertyCell::UpdateCell(property_dictionary, dictionary_entry(), value,
                              property_details_);
-  } else if (holder_map_->is_dictionary_map()) {
+  } else {
     NameDictionary* property_dictionary = holder->property_dictionary();
     property_dictionary->ValueAtPut(dictionary_entry(), *value);
-  } else if (property_details_.type() == v8::internal::DATA) {
-    JSObject::cast(*holder)->WriteToField(descriptor_number(), *value);
-  } else {
-    DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
   }
 }
 
 
-bool LookupIterator::IsIntegerIndexedExotic(JSReceiver* holder) {
-  DCHECK(exotic_index_state_ != ExoticIndexState::kNotExotic);
-  if (exotic_index_state_ == ExoticIndexState::kExotic) return true;
-  if (!InternalHolderIsReceiverOrHiddenPrototype()) {
-    exotic_index_state_ = ExoticIndexState::kNotExotic;
-    return false;
-  }
-  DCHECK(exotic_index_state_ == ExoticIndexState::kUninitialized);
-  bool result = false;
-  // Compute and cache result.
-  if (IsElement()) {
-    result = index_ >= JSTypedArray::cast(holder)->length_value();
-  } else if (name()->IsString()) {
-    Handle<String> name_string = Handle<String>::cast(name());
-    if (name_string->length() != 0) {
-      result = IsSpecialIndex(isolate_->unicode_cache(), *name_string);
-    }
-  }
-  exotic_index_state_ =
-      result ? ExoticIndexState::kExotic : ExoticIndexState::kNotExotic;
-  return result;
-}
-
-
-void LookupIterator::InternalizeName() {
-  if (name_->IsUniqueName()) return;
-  name_ = factory()->InternalizeString(Handle<String>::cast(name_));
-}
-
-
 bool LookupIterator::HasInterceptor(Map* map) const {
   if (IsElement()) return map->has_indexed_interceptor();
   return map->has_named_interceptor();
@@ -591,21 +610,30 @@
   DisallowHeapAllocation no_gc;
   if (!map->prototype()->IsJSReceiver()) return NULL;
 
-  JSReceiver* next = JSReceiver::cast(map->prototype());
-  DCHECK(!next->map()->IsJSGlobalObjectMap() ||
-         next->map()->is_hidden_prototype());
+  DCHECK(!map->IsJSGlobalProxyMap() || map->has_hidden_prototype());
 
   if (!check_prototype_chain() &&
-      !(check_hidden() && next->map()->is_hidden_prototype()) &&
+      !(check_hidden() && map->has_hidden_prototype()) &&
       // Always lookup behind the JSGlobalProxy into the JSGlobalObject, even
       // when not checking other hidden prototypes.
       !map->IsJSGlobalProxyMap()) {
     return NULL;
   }
 
-  return next;
+  return JSReceiver::cast(map->prototype());
 }
 
+LookupIterator::State LookupIterator::NotFound(JSReceiver* const holder) const {
+  DCHECK(!IsElement());
+  if (!holder->IsJSTypedArray() || !name_->IsString()) return NOT_FOUND;
+
+  Handle<String> name_string = Handle<String>::cast(name_);
+  if (name_string->length() == 0) return NOT_FOUND;
+
+  return IsSpecialIndex(isolate_->unicode_cache(), *name_string)
+             ? INTEGER_INDEXED_EXOTIC
+             : NOT_FOUND;
+}
 
 LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
                                                      JSReceiver* const holder) {
@@ -617,51 +645,32 @@
   switch (state_) {
     case NOT_FOUND:
       if (map->IsJSProxyMap()) {
-        // Do not leak private property names.
         if (IsElement() || !name_->IsPrivate()) return JSPROXY;
       }
-      if (map->is_access_check_needed() &&
-          (IsElement() || !isolate_->IsInternallyUsedPropertyName(name_))) {
-        return ACCESS_CHECK;
+      if (map->is_access_check_needed()) {
+        if (IsElement() || !name_->IsPrivate()) return ACCESS_CHECK;
       }
     // Fall through.
     case ACCESS_CHECK:
-      if (exotic_index_state_ != ExoticIndexState::kNotExotic &&
-          holder->IsJSTypedArray() && IsIntegerIndexedExotic(holder)) {
-        return INTEGER_INDEXED_EXOTIC;
-      }
       if (check_interceptor() && HasInterceptor(map) &&
           !SkipInterceptor(JSObject::cast(holder))) {
-        // Do not leak private property names.
-        if (!name_.is_null() && name_->IsPrivate()) return NOT_FOUND;
-        return INTERCEPTOR;
+        if (IsElement() || !name_->IsPrivate()) return INTERCEPTOR;
       }
     // Fall through.
     case INTERCEPTOR:
       if (IsElement()) {
-        // TODO(verwaest): Optimize.
-        if (holder->IsStringObjectWithCharacterAt(index_)) {
-          PropertyAttributes attributes =
-              static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
-          property_details_ = PropertyDetails(attributes, v8::internal::DATA, 0,
-                                              PropertyCellType::kNoCell);
-        } else {
-          JSObject* js_object = JSObject::cast(holder);
-          if (js_object->elements() == isolate()->heap()->empty_fixed_array()) {
-            return NOT_FOUND;
-          }
-
-          ElementsAccessor* accessor = js_object->GetElementsAccessor();
-          FixedArrayBase* backing_store = js_object->elements();
-          number_ =
-              accessor->GetEntryForIndex(js_object, backing_store, index_);
-          if (number_ == kMaxUInt32) return NOT_FOUND;
-          property_details_ = accessor->GetDetails(backing_store, number_);
+        JSObject* js_object = JSObject::cast(holder);
+        ElementsAccessor* accessor = js_object->GetElementsAccessor();
+        FixedArrayBase* backing_store = js_object->elements();
+        number_ = accessor->GetEntryForIndex(js_object, backing_store, index_);
+        if (number_ == kMaxUInt32) {
+          return holder->IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
         }
+        property_details_ = accessor->GetDetails(js_object, number_);
       } else if (!map->is_dictionary_map()) {
         DescriptorArray* descriptors = map->instance_descriptors();
-        int number = descriptors->SearchWithCache(*name_, map);
-        if (number == DescriptorArray::kNotFound) return NOT_FOUND;
+        int number = descriptors->SearchWithCache(isolate_, *name_, map);
+        if (number == DescriptorArray::kNotFound) return NotFound(holder);
         number_ = static_cast<uint32_t>(number);
         property_details_ = descriptors->GetDetails(number_);
       } else if (map->IsJSGlobalObjectMap()) {
@@ -676,7 +685,7 @@
       } else {
         NameDictionary* dict = holder->property_dictionary();
         int number = dict->FindEntry(name_);
-        if (number == NameDictionary::kNotFound) return NOT_FOUND;
+        if (number == NameDictionary::kNotFound) return NotFound(holder);
         number_ = static_cast<uint32_t>(number);
         property_details_ = dict->DetailsAt(number_);
       }
diff --git a/src/lookup.h b/src/lookup.h
index 7d68956..0c298d9 100644
--- a/src/lookup.h
+++ b/src/lookup.h
@@ -48,16 +48,14 @@
                  Configuration configuration = DEFAULT)
       : configuration_(ComputeConfiguration(configuration, name)),
         state_(NOT_FOUND),
-        exotic_index_state_(ExoticIndexState::kUninitialized),
         interceptor_state_(InterceptorState::kUninitialized),
         property_details_(PropertyDetails::Empty()),
         isolate_(name->GetIsolate()),
-        name_(Name::Flatten(name)),
+        name_(isolate_->factory()->InternalizeName(name)),
         // kMaxUInt32 isn't a valid index.
         index_(kMaxUInt32),
         receiver_(receiver),
         holder_(GetRoot(isolate_, receiver)),
-        holder_map_(holder_->map(), isolate_),
         initial_holder_(holder_),
         number_(DescriptorArray::kNotFound) {
 #ifdef DEBUG
@@ -72,16 +70,14 @@
                  Configuration configuration = DEFAULT)
       : configuration_(ComputeConfiguration(configuration, name)),
         state_(NOT_FOUND),
-        exotic_index_state_(ExoticIndexState::kUninitialized),
         interceptor_state_(InterceptorState::kUninitialized),
         property_details_(PropertyDetails::Empty()),
         isolate_(name->GetIsolate()),
-        name_(Name::Flatten(name)),
+        name_(isolate_->factory()->InternalizeName(name)),
         // kMaxUInt32 isn't a valid index.
         index_(kMaxUInt32),
         receiver_(receiver),
         holder_(holder),
-        holder_map_(holder_->map(), isolate_),
         initial_holder_(holder_),
         number_(DescriptorArray::kNotFound) {
 #ifdef DEBUG
@@ -95,7 +91,6 @@
                  Configuration configuration = DEFAULT)
       : configuration_(configuration),
         state_(NOT_FOUND),
-        exotic_index_state_(ExoticIndexState::kUninitialized),
         interceptor_state_(InterceptorState::kUninitialized),
         property_details_(PropertyDetails::Empty()),
         isolate_(isolate),
@@ -103,7 +98,6 @@
         index_(index),
         receiver_(receiver),
         holder_(GetRoot(isolate, receiver, index)),
-        holder_map_(holder_->map(), isolate_),
         initial_holder_(holder_),
         number_(DescriptorArray::kNotFound) {
     // kMaxUInt32 isn't a valid index.
@@ -116,7 +110,6 @@
                  Configuration configuration = DEFAULT)
       : configuration_(configuration),
         state_(NOT_FOUND),
-        exotic_index_state_(ExoticIndexState::kUninitialized),
         interceptor_state_(InterceptorState::kUninitialized),
         property_details_(PropertyDetails::Empty()),
         isolate_(isolate),
@@ -124,7 +117,6 @@
         index_(index),
         receiver_(receiver),
         holder_(holder),
-        holder_map_(holder_->map(), isolate_),
         initial_holder_(holder_),
         number_(DescriptorArray::kNotFound) {
     // kMaxUInt32 isn't a valid index.
@@ -135,27 +127,27 @@
   static LookupIterator PropertyOrElement(
       Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
       Configuration configuration = DEFAULT) {
-    name = Name::Flatten(name);
     uint32_t index;
-    LookupIterator it =
-        name->AsArrayIndex(&index)
-            ? LookupIterator(isolate, receiver, index, configuration)
-            : LookupIterator(receiver, name, configuration);
-    it.name_ = name;
-    return it;
+    if (name->AsArrayIndex(&index)) {
+      LookupIterator it =
+          LookupIterator(isolate, receiver, index, configuration);
+      it.name_ = name;
+      return it;
+    }
+    return LookupIterator(receiver, name, configuration);
   }
 
   static LookupIterator PropertyOrElement(
       Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
       Handle<JSReceiver> holder, Configuration configuration = DEFAULT) {
-    name = Name::Flatten(name);
     uint32_t index;
-    LookupIterator it =
-        name->AsArrayIndex(&index)
-            ? LookupIterator(isolate, receiver, index, holder, configuration)
-            : LookupIterator(receiver, name, holder, configuration);
-    it.name_ = name;
-    return it;
+    if (name->AsArrayIndex(&index)) {
+      LookupIterator it =
+          LookupIterator(isolate, receiver, index, holder, configuration);
+      it.name_ = name;
+      return it;
+    }
+    return LookupIterator(receiver, name, holder, configuration);
   }
 
   static LookupIterator PropertyOrElement(
@@ -193,7 +185,7 @@
   Factory* factory() const { return isolate_->factory(); }
   Handle<Object> GetReceiver() const { return receiver_; }
   Handle<JSObject> GetStoreTarget() const;
-  bool is_dictionary_holder() const { return holder_map_->is_dictionary_map(); }
+  bool is_dictionary_holder() const { return !holder_->HasFastProperties(); }
   Handle<Map> transition_map() const {
     DCHECK_EQ(TRANSITION, state_);
     return Handle<Map>::cast(transition_);
@@ -214,17 +206,23 @@
   bool HasAccess() const;
 
   /* PROPERTY */
+  bool ExtendingNonExtensible(Handle<JSObject> receiver) {
+    DCHECK(receiver.is_identical_to(GetStoreTarget()));
+    return !receiver->map()->is_extensible() &&
+           (IsElement() || !name_->IsPrivate());
+  }
   void PrepareForDataProperty(Handle<Object> value);
-  void PrepareTransitionToDataProperty(Handle<Object> value,
+  void PrepareTransitionToDataProperty(Handle<JSObject> receiver,
+                                       Handle<Object> value,
                                        PropertyAttributes attributes,
                                        Object::StoreFromKeyed store_mode);
   bool IsCacheableTransition() {
-    if (state_ != TRANSITION) return false;
+    DCHECK_EQ(TRANSITION, state_);
     return transition_->IsPropertyCell() ||
            (!transition_map()->is_dictionary_map() &&
             transition_map()->GetBackPointer()->IsMap());
   }
-  void ApplyTransitionToDataProperty();
+  void ApplyTransitionToDataProperty(Handle<JSObject> receiver);
   void ReconfigureDataProperty(Handle<Object> value,
                                PropertyAttributes attributes);
   void Delete();
@@ -237,13 +235,17 @@
     DCHECK(has_property_);
     return property_details_;
   }
+  PropertyAttributes property_attributes() const {
+    return property_details().attributes();
+  }
   bool IsConfigurable() const { return property_details().IsConfigurable(); }
   bool IsReadOnly() const { return property_details().IsReadOnly(); }
+  bool IsEnumerable() const { return property_details().IsEnumerable(); }
   Representation representation() const {
     return property_details().representation();
   }
   FieldIndex GetFieldIndex() const;
-  Handle<HeapType> GetFieldType() const;
+  Handle<FieldType> GetFieldType() const;
   int GetAccessorIndex() const;
   int GetConstantIndex() const;
   Handle<PropertyCell> GetPropertyCell() const;
@@ -254,8 +256,7 @@
   }
   Handle<Object> GetDataValue() const;
   void WriteDataValue(Handle<Object> value);
-  void InternalizeName();
-  void ReloadHolderMap();
+  void UpdateProtector();
 
  private:
   enum class InterceptorState {
@@ -277,7 +278,6 @@
   void ReloadPropertyInformation();
   inline bool SkipInterceptor(JSObject* holder);
   bool HasInterceptor(Map* map) const;
-  bool InternalHolderIsReceiverOrHiddenPrototype() const;
   inline InterceptorInfo* GetInterceptor(JSObject* holder) const {
     if (IsElement()) return holder->GetIndexedInterceptor();
     return holder->GetNamedInterceptor();
@@ -288,13 +288,15 @@
     return (configuration_ & kInterceptor) != 0;
   }
   int descriptor_number() const {
+    DCHECK(!IsElement());
     DCHECK(has_property_);
-    DCHECK(!holder_map_->is_dictionary_map());
+    DCHECK(holder_->HasFastProperties());
     return number_;
   }
   int dictionary_entry() const {
+    DCHECK(!IsElement());
     DCHECK(has_property_);
-    DCHECK(holder_map_->is_dictionary_map());
+    DCHECK(!holder_->HasFastProperties());
     return number_;
   }
 
@@ -317,15 +319,15 @@
     return GetRootForNonJSReceiver(isolate, receiver, index);
   }
 
-  enum class ExoticIndexState { kUninitialized, kNotExotic, kExotic };
-  inline bool IsIntegerIndexedExotic(JSReceiver* holder);
+  State NotFound(JSReceiver* const holder) const;
+
+  bool HolderIsInContextIndex(uint32_t index) const;
 
   // If configuration_ becomes mutable, update
   // HolderIsReceiverOrHiddenPrototype.
   const Configuration configuration_;
   State state_;
   bool has_property_;
-  ExoticIndexState exotic_index_state_;
   InterceptorState interceptor_state_;
   PropertyDetails property_details_;
   Isolate* const isolate_;
@@ -334,7 +336,6 @@
   Handle<Object> transition_;
   const Handle<Object> receiver_;
   Handle<JSReceiver> holder_;
-  Handle<Map> holder_map_;
   const Handle<JSReceiver> initial_holder_;
   uint32_t number_;
 };
diff --git a/src/machine-type.cc b/src/machine-type.cc
index 1fb886c..fcc3e97 100644
--- a/src/machine-type.cc
+++ b/src/machine-type.cc
@@ -26,6 +26,8 @@
       return os << "kRepFloat32";
     case MachineRepresentation::kFloat64:
       return os << "kRepFloat64";
+    case MachineRepresentation::kSimd128:
+      return os << "kRepSimd128";
     case MachineRepresentation::kTagged:
       return os << "kRepTagged";
   }
diff --git a/src/machine-type.h b/src/machine-type.h
index 97f6ae3..1085657 100644
--- a/src/machine-type.h
+++ b/src/machine-type.h
@@ -24,6 +24,7 @@
   kWord64,
   kFloat32,
   kFloat64,
+  kSimd128,
   kTagged
 };
 
@@ -84,6 +85,9 @@
     return MachineType(MachineRepresentation::kFloat64,
                        MachineSemantic::kNumber);
   }
+  static MachineType Simd128() {
+    return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
+  }
   static MachineType Int8() {
     return MachineType(MachineRepresentation::kWord8, MachineSemantic::kInt32);
   }
@@ -143,6 +147,9 @@
   static MachineType RepFloat64() {
     return MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone);
   }
+  static MachineType RepSimd128() {
+    return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
+  }
   static MachineType RepTagged() {
     return MachineType(MachineRepresentation::kTagged, MachineSemantic::kNone);
   }
@@ -187,6 +194,8 @@
     case MachineRepresentation::kWord64:
     case MachineRepresentation::kFloat64:
       return 3;
+    case MachineRepresentation::kSimd128:
+      return 4;
     case MachineRepresentation::kTagged:
       return kPointerSizeLog2;
     default:
diff --git a/src/messages.cc b/src/messages.cc
index 23deb1a..072ac1d 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -223,9 +223,12 @@
 
 
 Handle<Object> CallSite::GetMethodName() {
-  MaybeHandle<JSReceiver> maybe = Object::ToObject(isolate_, receiver_);
-  Handle<JSReceiver> receiver;
-  if (!maybe.ToHandle(&receiver) || !receiver->IsJSObject()) {
+  if (receiver_->IsNull() || receiver_->IsUndefined()) {
+    return isolate_->factory()->null_value();
+  }
+  Handle<JSReceiver> receiver =
+      Object::ToObject(isolate_, receiver_).ToHandleChecked();
+  if (!receiver->IsJSObject()) {
     return isolate_->factory()->null_value();
   }
 
@@ -247,7 +250,7 @@
     if (!current->IsJSObject()) break;
     Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
     if (current_obj->IsAccessCheckNeeded()) break;
-    Handle<FixedArray> keys = JSObject::GetEnumPropertyKeys(current_obj, false);
+    Handle<FixedArray> keys = JSObject::GetEnumPropertyKeys(current_obj);
     for (int i = 0; i < keys->length(); i++) {
       HandleScope inner_scope(isolate_);
       if (!keys->get(i)->IsName()) continue;
diff --git a/src/messages.h b/src/messages.h
index 8cd60b1..c71e11b 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -71,7 +71,6 @@
   int32_t pos_;
 };
 
-
 #define MESSAGE_TEMPLATES(T)                                                   \
   /* Error */                                                                  \
   T(None, "")                                                                  \
@@ -95,6 +94,7 @@
   T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements")         \
   T(ArrayNotSubclassable, "Subclassing Arrays is not currently supported.")    \
   T(CalledNonCallable, "% is not a function")                                  \
+  T(CalledNonCallableInstanceOf, "right-hand side is not a function")          \
   T(CalledOnNonObject, "% called on non-object")                               \
   T(CalledOnNullOrUndefined, "% called on null or undefined")                  \
   T(CallSiteExpectsFunction,                                                   \
@@ -295,6 +295,7 @@
   T(RestrictedFunctionProperties,                                              \
     "'caller' and 'arguments' are restricted function properties and cannot "  \
     "be accessed in this context.")                                            \
+  T(ReturnMethodNotCallable, "The iterator's 'return' method is not callable") \
   T(StaticPrototype, "Classes may not have static property named prototype")   \
   T(StrictCannotAssign, "Cannot assign to read only '%' in strict mode")       \
   T(StrictDeleteProperty, "Cannot delete property '%' of %")                   \
@@ -316,10 +317,13 @@
     "to be non-writable is deprecated")                                        \
   T(StrongSetProto,                                                            \
     "On strong object %, redefining the internal prototype is deprecated")     \
+  T(SymbolIteratorInvalid,                                                     \
+    "Result of the Symbol.iterator method is not an object")                   \
   T(SymbolKeyFor, "% is not a symbol")                                         \
   T(SymbolToNumber, "Cannot convert a Symbol value to a number")               \
   T(SymbolToString, "Cannot convert a Symbol value to a string")               \
   T(SimdToNumber, "Cannot convert a SIMD value to a number")                   \
+  T(ThrowMethodMissing, "The iterator does not provide a 'throw' method.")     \
   T(UndefinedOrNullToObject, "Cannot convert undefined or null to object")     \
   T(ValueAndAccessor,                                                          \
     "Invalid property descriptor. Cannot both specify accessors and a value "  \
@@ -332,8 +336,6 @@
   T(StrongSuperCallMissing,                                                    \
     "In strong mode, invoking the super constructor in a subclass is "         \
     "required")                                                                \
-  T(StrongUnboundGlobal,                                                       \
-    "In strong mode, using an undeclared global variable '%' is not allowed")  \
   T(UnsupportedSuper, "Unsupported reference to 'super'")                      \
   /* RangeError */                                                             \
   T(DateRange, "Provided date is not in valid range.")                         \
@@ -384,12 +386,10 @@
   T(DuplicateExport, "Duplicate export of '%'")                                \
   T(DuplicateProto,                                                            \
     "Duplicate __proto__ fields are not allowed in object literals")           \
-  T(ForInLoopInitializer,                                                      \
-    "for-in loop variable declaration may not have an initializer.")           \
+  T(ForInOfLoopInitializer,                                                    \
+    "% loop variable declaration may not have an initializer.")                \
   T(ForInOfLoopMultiBindings,                                                  \
     "Invalid left-hand side in % loop: Must have a single binding.")           \
-  T(ForOfLoopInitializer,                                                      \
-    "for-of loop variable declaration may not have an initializer.")           \
   T(IllegalAccess, "Illegal access")                                           \
   T(IllegalBreak, "Illegal break statement")                                   \
   T(IllegalContinue, "Illegal continue statement")                             \
@@ -397,6 +397,7 @@
     "Illegal '%' directive in function with non-simple parameter list")        \
   T(IllegalReturn, "Illegal return statement")                                 \
   T(InvalidEscapedReservedWord, "Keyword must not contain escaped characters") \
+  T(InvalidEscapedMetaProperty, "'%' must not contain escaped characters")     \
   T(InvalidLhsInAssignment, "Invalid left-hand side in assignment")            \
   T(InvalidCoverInitializedName, "Invalid shorthand property initializer")     \
   T(InvalidDestructuringTarget, "Invalid destructuring assignment target")     \
@@ -406,6 +407,10 @@
   T(InvalidLhsInPrefixOp,                                                      \
     "Invalid left-hand side expression in prefix operation")                   \
   T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'")    \
+  T(JsonParseUnexpectedEOS, "Unexpected end of JSON input")                    \
+  T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %")      \
+  T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
+  T(JsonParseUnexpectedTokenString, "Unexpected string in JSON at position %") \
   T(LabelRedeclaration, "Label '%' has already been declared")                 \
   T(MalformedArrowFunParamList, "Malformed arrow function parameter list")     \
   T(MalformedRegExp, "Invalid regular expression: /%/: %")                     \
@@ -417,6 +422,8 @@
   T(NoCatchOrFinally, "Missing catch or finally after try")                    \
   T(NotIsvar, "builtin %%IS_VAR: not a variable")                              \
   T(ParamAfterRest, "Rest parameter must be last formal parameter")            \
+  T(InvalidRestParameter,                                                      \
+    "Rest parameter must be an identifier or destructuring pattern")           \
   T(PushPastSafeLength,                                                        \
     "Pushing % elements on an array-like of length % "                         \
     "is disallowed, as the total surpasses 2**53-1")                           \
@@ -480,8 +487,6 @@
     "with 'break', 'continue', 'return' or 'throw'")                           \
   T(StrongUndefined,                                                           \
     "In strong mode, binding or assigning to 'undefined' is deprecated")       \
-  T(StrongUseBeforeDeclaration,                                                \
-    "In strong mode, declaring variable '%' before its use is required")       \
   T(StrongVar,                                                                 \
     "In strong mode, 'var' is deprecated, use 'let' or 'const' instead")       \
   T(TemplateOctalLiteral,                                                      \
@@ -495,6 +500,8 @@
   T(TypedArrayTooShort,                                                        \
     "Derived TypedArray constructor created an array which was too small")     \
   T(UnexpectedEOS, "Unexpected end of input")                                  \
+  T(UnexpectedFunctionSent,                                                    \
+    "function.sent expression is not allowed outside a generator")             \
   T(UnexpectedReserved, "Unexpected reserved word")                            \
   T(UnexpectedStrictReserved, "Unexpected strict mode reserved word")          \
   T(UnexpectedSuper, "'super' keyword unexpected here")                        \
@@ -510,6 +517,8 @@
   T(UnterminatedRegExp, "Invalid regular expression: missing /")               \
   T(UnterminatedTemplate, "Unterminated template literal")                     \
   T(UnterminatedTemplateExpr, "Missing } in template expression")              \
+  T(FoundNonCallableHasInstance, "Found non-callable @@hasInstance")           \
+  T(NonObjectInInstanceOfCheck, "Expecting an object in instanceof check")     \
   /* EvalError */                                                              \
   T(CodeGenFromStrings, "%")                                                   \
   /* URIError */                                                               \
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 27ec8e5..5e27f45 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -214,8 +214,8 @@
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
       target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target));
   }
 }
 
@@ -284,10 +284,8 @@
   Address address = cell->address() + Cell::kValueOffset;
   Memory::Address_at(pc_) = address;
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
-    // TODO(1550) We are passing NULL as a slot because cell can never be on
-    // evacuation candidate.
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), NULL, cell);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+                                                                  cell);
   }
 }
 
@@ -351,25 +349,6 @@
 }
 
 
-bool RelocInfo::IsPatchedReturnSequence() {
-  Instr instr0 = Assembler::instr_at(pc_);
-  Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
-  Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
-  bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
-                         (instr1 & kOpcodeMask) == ORI &&
-                         ((instr2 & kOpcodeMask) == JAL ||
-                          ((instr2 & kOpcodeMask) == SPECIAL &&
-                           (instr2 & kFunctionFieldMask) == JALR)));
-  return patched_return;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
-  Instr current_instr = Assembler::instr_at(pc_);
-  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index a8b6cc7..e50a239 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -285,10 +285,7 @@
 
 
 void Assembler::GetCode(CodeDesc* desc) {
-  if (IsPrevInstrCompactBranch()) {
-    nop();
-    ClearCompactBranchState();
-  }
+  EmitForbiddenSlotInstruction();
   DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
   // Set up code descriptor.
   desc->buffer = buffer_;
@@ -302,10 +299,7 @@
 
 void Assembler::Align(int m) {
   DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
-  if (IsPrevInstrCompactBranch()) {
-    nop();
-    ClearCompactBranchState();
-  }
+  EmitForbiddenSlotInstruction();
   while ((pc_offset() & (m - 1)) != 0) {
     nop();
   }
@@ -2092,20 +2086,7 @@
   // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
   // load to two 32-bit loads.
   DCHECK(!src.rm().is(at));
-  if (IsFp64Mode()) {
-    if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
-      GenInstrImmediate(LWC1, src.rm(), fd,
-                        src.offset_ + Register::kMantissaOffset);
-      GenInstrImmediate(LW, src.rm(), at,
-                        src.offset_ + Register::kExponentOffset);
-      mthc1(at, fd);
-    } else {  // Offset > 16 bits, use multiple instructions to load.
-      LoadRegPlusOffsetToAt(src);
-      GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
-      GenInstrImmediate(LW, at, at, Register::kExponentOffset);
-      mthc1(at, fd);
-    }
-  } else {  // fp32 mode.
+  if (IsFp32Mode()) {  // fp32 mode.
     if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
       GenInstrImmediate(LWC1, src.rm(), fd,
                         src.offset_ + Register::kMantissaOffset);
@@ -2120,6 +2101,22 @@
       nextfpreg.setcode(fd.code() + 1);
       GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
     }
+  } else {
+    DCHECK(IsFp64Mode() || IsFpxxMode());
+    // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+    if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
+      GenInstrImmediate(LWC1, src.rm(), fd,
+                        src.offset_ + Register::kMantissaOffset);
+      GenInstrImmediate(LW, src.rm(), at,
+                        src.offset_ + Register::kExponentOffset);
+      mthc1(at, fd);
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(src);
+      GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
+      GenInstrImmediate(LW, at, at, Register::kExponentOffset);
+      mthc1(at, fd);
+    }
   }
 }
 
@@ -2139,20 +2136,7 @@
   // store to two 32-bit stores.
   DCHECK(!src.rm().is(at));
   DCHECK(!src.rm().is(t8));
-  if (IsFp64Mode()) {
-    if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
-      GenInstrImmediate(SWC1, src.rm(), fd,
-                        src.offset_ + Register::kMantissaOffset);
-      mfhc1(at, fd);
-      GenInstrImmediate(SW, src.rm(), at,
-                        src.offset_ + Register::kExponentOffset);
-    } else {  // Offset > 16 bits, use multiple instructions to load.
-      LoadRegPlusOffsetToAt(src);
-      GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
-      mfhc1(t8, fd);
-      GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
-    }
-  } else {  // fp32 mode.
+  if (IsFp32Mode()) {  // fp32 mode.
     if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
       GenInstrImmediate(SWC1, src.rm(), fd,
                         src.offset_ + Register::kMantissaOffset);
@@ -2167,6 +2151,22 @@
       nextfpreg.setcode(fd.code() + 1);
       GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
     }
+  } else {
+    DCHECK(IsFp64Mode() || IsFpxxMode());
+    // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+    if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
+      GenInstrImmediate(SWC1, src.rm(), fd,
+                        src.offset_ + Register::kMantissaOffset);
+      mfhc1(at, fd);
+      GenInstrImmediate(SW, src.rm(), at,
+                        src.offset_ + Register::kExponentOffset);
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(src);
+      GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
+      mfhc1(t8, fd);
+      GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
+    }
   }
 }
 
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 0546954..b708ef7 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -304,6 +304,8 @@
 const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
 const FPUControlRegister FCSR = { kFCSRRegister };
 
+// TODO(mips) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
 
 // -----------------------------------------------------------------------------
 // Machine instruction Operands.
@@ -518,14 +520,11 @@
   // a target is resolved and written.
   static const int kSpecialTargetSize = 0;
 
-  // Number of consecutive instructions used to store 32bit constant.
-  // Before jump-optimizations, this constant was used in
-  // RelocInfo::target_address_address() function to tell serializer address of
-  // the instruction that follows LUI/ORI instruction pair. Now, with new jump
-  // optimization, where jump-through-register instruction that usually
-  // follows LUI/ORI pair is substituted with J/JAL, this constant equals
-  // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
-  static const int kInstructionsFor32BitConstant = 3;
+  // Number of consecutive instructions used to store 32bit constant. This
+  // constant is used in RelocInfo::target_address_address() function to tell
+  // serializer address of the instruction that follows LUI/ORI instruction
+  // pair.
+  static const int kInstructionsFor32BitConstant = 2;
 
   // Distance between the instruction referring to the address of the call
   // target and the return address.
@@ -1035,7 +1034,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, const SourcePosition position);
+  void RecordDeoptReason(const int reason, int raw_position);
 
 
   static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
@@ -1206,6 +1205,12 @@
     return block_buffer_growth_;
   }
 
+  void EmitForbiddenSlotInstruction() {
+    if (IsPrevInstrCompactBranch()) {
+      nop();
+    }
+  }
+
   inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
 
  private:
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index f6c1dfb..09f4d59 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -142,6 +142,107 @@
 
 
 // static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments
+  //  -- ra                 : return address
+  //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- sp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in a1 and the double value in f0.
+  __ LoadRoot(a1, root_index);
+  __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+  __ mov(a3, a0);
+
+  Label done_loop, loop;
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ Subu(a0, a0, Operand(1));
+    __ Branch(&done_loop, lt, a0, Operand(zero_reg));
+
+    // Load the next parameter tagged value into a2.
+    __ Lsa(at, sp, a0, kPointerSizeLog2);
+    __ lw(a2, MemOperand(at));
+
+    // Load the double value of the parameter into f2, maybe converting the
+    // parameter to a number first using the ToNumberStub if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(a2, &convert_smi);
+    __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
+    __ JumpIfRoot(t0, Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumberStub to convert it.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(a0);
+      __ SmiTag(a3);
+      __ Push(a0, a1, a3);
+      __ mov(a0, a2);
+      ToNumberStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(a2, v0);
+      __ Pop(a0, a1, a3);
+      {
+        // Restore the double accumulator value (f0).
+        Label restore_smi, done_restore;
+        __ JumpIfSmi(a1, &restore_smi);
+        __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+        __ jmp(&done_restore);
+        __ bind(&restore_smi);
+        __ SmiToDoubleFPURegister(a1, f0, t0);
+        __ bind(&done_restore);
+      }
+      __ SmiUntag(a3);
+      __ SmiUntag(a0);
+    }
+    __ jmp(&convert);
+    __ bind(&convert_number);
+    __ ldc1(f2, FieldMemOperand(a2, HeapNumber::kValueOffset));
+    __ jmp(&done_convert);
+    __ bind(&convert_smi);
+    __ SmiToDoubleFPURegister(a2, f2, t0);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (f0) and the next parameter value on the right hand side (f2).
+    Label compare_equal, compare_nan, compare_swap;
+    __ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
+    __ BranchF(&compare_swap, nullptr, cc, f0, f2);
+    __ Branch(&loop);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ bind(&compare_equal);
+    __ FmoveHigh(t0, reg);
+    __ Branch(&loop, ne, t0, Operand(0x80000000));
+
+    // Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ mov_d(f0, f2);
+    __ mov(a1, a2);
+    __ jmp(&loop);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    __ LoadRoot(a1, Heap::kNanValueRootIndex);
+    __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+    __ jmp(&loop);
+  }
+
+  __ bind(&done_loop);
+  __ Lsa(sp, sp, a3, kPointerSizeLog2);
+  __ mov(v0, a1);
+  __ DropAndRet(1);
+}
+
+// static
 void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0                     : number of arguments
@@ -157,8 +258,7 @@
   {
     __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
     __ Subu(a0, a0, Operand(1));
-    __ sll(a0, a0, kPointerSizeLog2);
-    __ Addu(sp, a0, sp);
+    __ Lsa(sp, sp, a0, kPointerSizeLog2);
     __ lw(a0, MemOperand(sp));
     __ Drop(2);
   }
@@ -194,8 +294,7 @@
     Label no_arguments, done;
     __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
     __ Subu(a0, a0, Operand(1));
-    __ sll(a0, a0, kPointerSizeLog2);
-    __ Addu(sp, a0, sp);
+    __ Lsa(sp, sp, a0, kPointerSizeLog2);
     __ lw(a0, MemOperand(sp));
     __ Drop(2);
     __ jmp(&done);
@@ -234,8 +333,9 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(a0, a1, a3);  // first argument, constructor, new target
-    __ CallRuntime(Runtime::kNewObject);
+    __ Push(a0);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(a0);
   }
   __ Ret(USE_DELAY_SLOT);
@@ -259,8 +359,7 @@
   {
     __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
     __ Subu(a0, a0, Operand(1));
-    __ sll(a0, a0, kPointerSizeLog2);
-    __ Addu(sp, a0, sp);
+    __ Lsa(sp, sp, a0, kPointerSizeLog2);
     __ lw(a0, MemOperand(sp));
     __ Drop(2);
   }
@@ -322,8 +421,7 @@
     Label no_arguments, done;
     __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
     __ Subu(a0, a0, Operand(1));
-    __ sll(a0, a0, kPointerSizeLog2);
-    __ Addu(sp, a0, sp);
+    __ Lsa(sp, sp, a0, kPointerSizeLog2);
     __ lw(a0, MemOperand(sp));
     __ Drop(2);
     __ jmp(&done);
@@ -364,33 +462,15 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(a0, a1, a3);  // first argument, constructor, new target
-    __ CallRuntime(Runtime::kNewObject);
+    __ Push(a0);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(a0);
   }
   __ Ret(USE_DELAY_SLOT);
   __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));  // In delay slot
 }
 
-
-static void CallRuntimePassFunction(
-    MacroAssembler* masm, Runtime::FunctionId function_id) {
-  // ----------- S t a t e -------------
-  //  -- a1 : target function (preserved for callee)
-  //  -- a3 : new target (preserved for callee)
-  // -----------------------------------
-
-  FrameScope scope(masm, StackFrame::INTERNAL);
-  // Push a copy of the target function and the new target.
-  // Push function as parameter to the runtime call.
-  __ Push(a1, a3, a1);
-
-  __ CallRuntime(function_id, 1);
-  // Restore target function and new target.
-  __ Pop(a1, a3);
-}
-
-
 static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
   __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
   __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
@@ -398,8 +478,27 @@
   __ Jump(at);
 }
 
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argument count (preserved for callee)
+  //  -- a1 : target function (preserved for callee)
+  //  -- a3 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push a copy of the target function and the new target.
+    // Push function as parameter to the runtime call.
+    __ SmiTag(a0);
+    __ Push(a0, a1, a3, a1);
 
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+    __ CallRuntime(function_id, 1);
+
+    // Restore target function and new target.
+    __ Pop(a0, a1, a3);
+    __ SmiUntag(a0);
+  }
+
   __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Jump(at);
 }
@@ -415,8 +514,7 @@
   __ LoadRoot(t0, Heap::kStackLimitRootIndex);
   __ Branch(&ok, hs, sp, Operand(t0));
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
 
   __ bind(&ok);
   GenerateTailCallToSharedCode(masm);
@@ -425,7 +523,8 @@
 
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
-                                           bool create_implicit_receiver) {
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- a0     : number of arguments
   //  -- a1     : constructor function
@@ -447,144 +546,18 @@
     __ Push(a2, a0);
 
     if (create_implicit_receiver) {
-      // Try to allocate the object without transitioning into C code. If any of
-      // the preconditions is not met, the code bails out to the runtime call.
-      Label rt_call, allocated;
-      if (FLAG_inline_new) {
-        // Verify that the new target is a JSFunction.
-        __ GetObjectType(a3, t1, t0);
-        __ Branch(&rt_call, ne, t0, Operand(JS_FUNCTION_TYPE));
-
-        // Load the initial map and verify that it is in fact a map.
-        // a3: new target
-        __ lw(a2,
-              FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
-        __ JumpIfSmi(a2, &rt_call);
-        __ GetObjectType(a2, t5, t4);
-        __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
-
-        // Fall back to runtime if the expected base constructor and base
-        // constructor differ.
-        __ lw(t1, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
-        __ Branch(&rt_call, ne, a1, Operand(t1));
-
-        // Check that the constructor is not constructing a JSFunction (see
-        // comments in Runtime_NewObject in runtime.cc). In which case the
-        // initial map's instance type would be JS_FUNCTION_TYPE.
-        // a1: constructor function
-        // a2: initial map
-        __ lbu(t5, FieldMemOperand(a2, Map::kInstanceTypeOffset));
-        __ Branch(&rt_call, eq, t5, Operand(JS_FUNCTION_TYPE));
-
-        // Now allocate the JSObject on the heap.
-        // a1: constructor function
-        // a2: initial map
-        // a3: new target
-        __ lbu(t3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-
-        __ Allocate(t3, t4, t3, t6, &rt_call, SIZE_IN_WORDS);
-
-        // Allocated the JSObject, now initialize the fields. Map is set to
-        // initial map and properties and elements are set to empty fixed array.
-        // a1: constructor function
-        // a2: initial map
-        // a3: new target
-        // t4: JSObject (not HeapObject tagged - the actual address).
-        // t3: start of next object
-        __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
-        __ mov(t5, t4);
-        STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
-        __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
-        STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
-        __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
-        STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
-        __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
-        STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
-        __ Addu(t5, t5, Operand(3 * kPointerSize));
-
-        // Add the object tag to make the JSObject real, so that we can continue
-        // and jump into the continuation code at any time from now on.
-        __ Addu(t4, t4, Operand(kHeapObjectTag));
-
-        // Fill all the in-object properties with appropriate filler.
-        // t4: JSObject (tagged)
-        // t5: First in-object property of JSObject (not tagged)
-        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-
-        if (!is_api_function) {
-          Label no_inobject_slack_tracking;
-
-          MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
-          // Check if slack tracking is enabled.
-          __ lw(t0, bit_field3);
-          __ DecodeField<Map::ConstructionCounter>(t2, t0);
-          // t2: slack tracking counter
-          __ Branch(&no_inobject_slack_tracking, lt, t2,
-                    Operand(Map::kSlackTrackingCounterEnd));
-          // Decrease generous allocation count.
-          __ Subu(t0, t0, Operand(1 << Map::ConstructionCounter::kShift));
-          __ sw(t0, bit_field3);
-
-          // Allocate object with a slack.
-          __ lbu(a0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
-          __ sll(a0, a0, kPointerSizeLog2);
-          __ subu(a0, t3, a0);
-          // a0: offset of first field after pre-allocated fields
-          if (FLAG_debug_code) {
-            __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, t5,
-                      Operand(a0));
-          }
-          __ InitializeFieldsWithFiller(t5, a0, t7);
-
-          // To allow truncation fill the remaining fields with one pointer
-          // filler map.
-          __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
-          __ InitializeFieldsWithFiller(t5, t3, t7);
-
-          // t2: slack tracking counter value before decreasing.
-          __ Branch(&allocated, ne, t2, Operand(Map::kSlackTrackingCounterEnd));
-
-          // Push the constructor, new_target and the object to the stack,
-          // and then the initial map as an argument to the runtime call.
-          __ Push(a1, a3, t4, a2);
-          __ CallRuntime(Runtime::kFinalizeInstanceSize);
-          __ Pop(a1, a3, t4);
-
-          // Continue with JSObject being successfully allocated.
-          // a1: constructor function
-          // a3: new target
-          // t4: JSObject
-          __ jmp(&allocated);
-
-          __ bind(&no_inobject_slack_tracking);
-        }
-
-        __ InitializeFieldsWithFiller(t5, t3, t7);
-
-        // Continue with JSObject being successfully allocated.
-        // a1: constructor function
-        // a3: new target
-        // t4: JSObject
-        __ jmp(&allocated);
-      }
-
-      // Allocate the new receiver object using the runtime call.
-      // a1: constructor function
-      // a3: new target
-      __ bind(&rt_call);
-
-      // Push the constructor and new_target twice, second pair as arguments
-      // to the runtime call.
-      __ Push(a1, a3, a1, a3);  // constructor function, new target
-      __ CallRuntime(Runtime::kNewObject);
+      // Allocate the new receiver object.
+      __ Push(a1, a3);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
       __ mov(t4, v0);
       __ Pop(a1, a3);
 
-      // Receiver for constructor call allocated.
-      // a1: constructor function
-      // a3: new target
-      // t4: JSObject
-      __ bind(&allocated);
+      // ----------- S t a t e -------------
+      //  -- a1: constructor function
+      //  -- a3: new target
+      //  -- t0: newly allocated object
+      // -----------------------------------
 
       // Retrieve smi-tagged arguments count from the stack.
       __ lw(a0, MemOperand(sp));
@@ -617,8 +590,7 @@
     __ SmiTag(t4, a0);
     __ jmp(&entry);
     __ bind(&loop);
-    __ sll(t0, t4, kPointerSizeLog2 - kSmiTagSize);
-    __ Addu(t0, a2, Operand(t0));
+    __ Lsa(t0, a2, t4, kPointerSizeLog2 - kSmiTagSize);
     __ lw(t1, MemOperand(t0));
     __ push(t1);
     __ bind(&entry);
@@ -684,8 +656,20 @@
     // Leave construct frame.
   }
 
-  __ sll(t0, a1, kPointerSizeLog2 - 1);
-  __ Addu(sp, sp, t0);
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(v0, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
+  __ Lsa(sp, sp, a1, kPointerSizeLog2 - 1);
   __ Addu(sp, sp, kPointerSize);
   if (create_implicit_receiver) {
     __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
@@ -695,17 +679,23 @@
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, true);
+  Generate_JSConstructStubHelper(masm, false, true, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true, true);
+  Generate_JSConstructStubHelper(masm, true, false, false);
 }
 
 
 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, false);
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
 
@@ -787,8 +777,7 @@
     // a3: argc
     // s0: argv, i.e. points to first arg
     Label loop, entry;
-    __ sll(t0, a3, kPointerSizeLog2);
-    __ addu(t2, s0, t0);
+    __ Lsa(t2, s0, a3, kPointerSizeLog2);
     __ b(&entry);
     __ nop();   // Branch delay slot nop.
     // t2 points past last arg.
@@ -851,10 +840,8 @@
 //   o sp: stack pointer
 //   o ra: return address
 //
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-mips.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -863,16 +850,19 @@
 
   __ Push(ra, fp, cp, a1);
   __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
-  __ Push(a3);
-
-  // Push zero for bytecode array offset.
-  __ Push(zero_reg);
 
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into kInterpreterBytecodeRegister.
   __ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  DCHECK(!debug_info.is(a0));
+  __ lw(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+  __ Branch(&load_debug_bytecode_array, ne, debug_info,
+            Operand(DebugInfo::uninitialized()));
   __ lw(kInterpreterBytecodeArrayRegister,
         FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+  __ bind(&bytecode_array_loaded);
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -884,6 +874,9 @@
               Operand(BYTECODE_ARRAY_TYPE));
   }
 
+  // Push new.target, bytecode array and zero for bytecode array offset.
+  __ Push(a3, kInterpreterBytecodeArrayRegister, zero_reg);
+
   // Allocate the local and temporary register file on the stack.
   {
     // Load frame size from the BytecodeArray object.
@@ -914,44 +907,38 @@
 
   // TODO(rmcilroy): List of things not currently dealt with here but done in
   // fullcodegen's prologue:
-  //  - Support profiler (specifically profiling_counter).
   //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Allow simulator stop operations if FLAG_stop_at is set.
   //  - Code aging of the BytecodeArray object.
 
-  // Perform stack guard check.
-  {
-    Label ok;
-    __ LoadRoot(at, Heap::kStackLimitRootIndex);
-    __ Branch(&ok, hs, sp, Operand(at));
-    __ push(kInterpreterBytecodeArrayRegister);
-    __ CallRuntime(Runtime::kStackGuard);
-    __ pop(kInterpreterBytecodeArrayRegister);
-    __ bind(&ok);
-  }
-
   // Load bytecode offset and dispatch table into registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
   __ Addu(kInterpreterRegisterFileRegister, fp,
           Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
   __ li(kInterpreterBytecodeOffsetRegister,
         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ li(kInterpreterDispatchTableRegister,
+        Operand(ExternalReference::interpreter_dispatch_table_address(
+            masm->isolate())));
 
   // Dispatch to the first bytecode handler for the function.
   __ Addu(a0, kInterpreterBytecodeArrayRegister,
           kInterpreterBytecodeOffsetRegister);
   __ lbu(a0, MemOperand(a0));
-  __ sll(at, a0, kPointerSizeLog2);
-  __ Addu(at, kInterpreterDispatchTableRegister, at);
+  __ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
   __ lw(at, MemOperand(at));
   // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
   // and header removal.
   __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Call(at);
+
+  // Even though the first bytecode handler was called, we will never return.
+  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+  // Load debug copy of the bytecode array.
+  __ bind(&load_debug_bytecode_array);
+  __ lw(kInterpreterBytecodeArrayRegister,
+        FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+  __ Branch(&bytecode_array_loaded);
 }
 
 
@@ -976,7 +963,8 @@
 
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
   //  -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1001,7 +989,9 @@
   __ Branch(&loop_header, gt, a2, Operand(a3));
 
   // Call the target.
-  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            tail_call_mode),
+          RelocInfo::CODE_TARGET);
 }
 
 
@@ -1036,47 +1026,24 @@
 }
 
 
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(kInterpreterAccumulatorRegister);  // Save accumulator register.
-
-    // Pass the deoptimization type to the runtime system.
-    __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
-    __ push(a1);
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-
-    __ pop(kInterpreterAccumulatorRegister);  // Restore accumulator register.
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use this for interpreter deopts).
-  __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
   // Initialize register file register and dispatch table register.
   __ Addu(kInterpreterRegisterFileRegister, fp,
           Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ li(kInterpreterDispatchTableRegister,
+        Operand(ExternalReference::interpreter_dispatch_table_address(
+            masm->isolate())));
 
   // Get the context from the frame.
-  // TODO(rmcilroy): Update interpreter frame to expect current context at the
-  // context slot instead of the function context.
   __ lw(kContextRegister,
         MemOperand(kInterpreterRegisterFileRegister,
                    InterpreterFrameConstants::kContextFromRegisterPointer));
 
   // Get the bytecode array pointer from the frame.
-  __ lw(a1,
-        MemOperand(kInterpreterRegisterFileRegister,
-                   InterpreterFrameConstants::kFunctionFromRegisterPointer));
-  __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(kInterpreterBytecodeArrayRegister,
-        FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
+  __ lw(
+      kInterpreterBytecodeArrayRegister,
+      MemOperand(kInterpreterRegisterFileRegister,
+                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -1099,14 +1066,36 @@
   __ Addu(a1, kInterpreterBytecodeArrayRegister,
           kInterpreterBytecodeOffsetRegister);
   __ lbu(a1, MemOperand(a1));
-  __ sll(a1, a1, kPointerSizeLog2);
-  __ Addu(a1, kInterpreterDispatchTableRegister, a1);
+  __ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
   __ lw(a1, MemOperand(a1));
   __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Jump(a1);
 }
 
 
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+    MacroAssembler* masm, Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass the deoptimization type to the runtime system.
+    __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(a1);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+    // Tear down internal frame.
+  }
+
+  // Drop state (we don't use these for interpreter deopts) and and pop the
+  // accumulator value into the accumulator register.
+  __ Drop(1);
+  __ Pop(kInterpreterAccumulatorRegister);
+
+  // Enter the bytecode dispatch.
+  Generate_EnterBytecodeDispatch(masm);
+}
+
+
 void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
 }
@@ -1121,22 +1110,30 @@
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the address of the interpreter entry trampoline as a return address.
+  // This simulates the initial call to bytecode handlers in interpreter entry
+  // trampoline. The return will never actually be taken, but our stack walker
+  // uses this address to determine whether a frame is interpreted.
+  __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+
+  Generate_EnterBytecodeDispatch(masm);
+}
+
 
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
 }
 
 
 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
 }
 
 
@@ -1356,13 +1353,11 @@
 
   // Load the next prototype and iterate.
   __ bind(&next_prototype);
-  __ lw(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
-  // End if the prototype is null or not hidden.
-  __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
-  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ lw(scratch, FieldMemOperand(map, Map::kBitField3Offset));
-  __ DecodeField<Map::IsHiddenPrototype>(scratch);
+  __ DecodeField<Map::HasHiddenPrototype>(scratch);
   __ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+  __ lw(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
 
   __ Branch(&prototype_loop_start);
 
@@ -1387,8 +1382,7 @@
 
   // Do the compatible receiver check.
   Label receiver_check_failed;
-  __ sll(at, a0, kPointerSizeLog2);
-  __ Addu(t8, sp, at);
+  __ Lsa(t8, sp, a0, kPointerSizeLog2);
   __ lw(t0, MemOperand(t8));
   CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
 
@@ -1522,6 +1516,7 @@
     Register scratch = t0;
     __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
     __ mov(a3, a2);
+    // Lsa() cannot be used hare as scratch value used later.
     __ sll(scratch, a0, kPointerSizeLog2);
     __ Addu(a0, sp, Operand(scratch));
     __ lw(a1, MemOperand(a0));  // receiver
@@ -1592,8 +1587,7 @@
 
   // 2. Get the function to call (passed as receiver) from the stack.
   // a0: actual number of arguments
-  __ sll(at, a0, kPointerSizeLog2);
-  __ addu(at, sp, at);
+  __ Lsa(at, sp, a0, kPointerSizeLog2);
   __ lw(a1, MemOperand(at));
 
   // 3. Shift arguments and return address one slot down on the stack
@@ -1604,8 +1598,7 @@
   {
     Label loop;
     // Calculate the copy start address (destination). Copy end address is sp.
-    __ sll(at, a0, kPointerSizeLog2);
-    __ addu(a2, sp, at);
+    __ Lsa(a2, sp, a0, kPointerSizeLog2);
 
     __ bind(&loop);
     __ lw(at, MemOperand(a2, -kPointerSize));
@@ -1705,6 +1698,7 @@
     Register scratch = t0;
     __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
     __ mov(a2, a1);
+    // Lsa() cannot be used hare as scratch value used later.
     __ sll(scratch, a0, kPointerSizeLog2);
     __ Addu(a0, sp, Operand(scratch));
     __ sw(a2, MemOperand(a0));  // receiver
@@ -1806,8 +1800,7 @@
                              kPointerSize)));
   __ mov(sp, fp);
   __ MultiPop(fp.bit() | ra.bit());
-  __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(sp, sp, t0);
+  __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
   // Adjust for the receiver.
   __ Addu(sp, sp, Operand(kPointerSize));
 }
@@ -1859,9 +1852,7 @@
 
     // Try to create the list from an arguments object.
     __ bind(&create_arguments);
-    __ lw(a2,
-          FieldMemOperand(a0, JSObject::kHeaderSize +
-                                  Heap::kArgumentsLengthIndex * kPointerSize));
+    __ lw(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
     __ lw(t0, FieldMemOperand(a0, JSObject::kElementsOffset));
     __ lw(at, FieldMemOperand(t0, FixedArray::kLengthOffset));
     __ Branch(&create_runtime, ne, a2, Operand(at));
@@ -1915,8 +1906,7 @@
     Label done, loop;
     __ bind(&loop);
     __ Branch(&done, eq, t0, Operand(a2));
-    __ sll(at, t0, kPointerSizeLog2);
-    __ Addu(at, a0, at);
+    __ Lsa(at, a0, t0, kPointerSizeLog2);
     __ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
     __ Push(at);
     __ Addu(t0, t0, Operand(1));
@@ -1936,10 +1926,134 @@
   }
 }
 
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if the debugger is not active.
+  Label done;
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(masm->isolate());
+  __ li(at, Operand(debug_is_active));
+  __ lb(scratch1, MemOperand(at));
+  __ Branch(&done, ne, scratch1, Operand(zero_reg));
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ lw(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+    __ Branch(&no_interpreter_frame, ne, scratch3,
+              Operand(Smi::FromInt(StackFrame::STUB)));
+    __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ Branch(&no_arguments_adaptor, ne, scratch3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Drop arguments adaptor frame and load arguments count.
+  __ mov(fp, scratch2);
+  __ lw(scratch1,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(scratch1);
+  __ Branch(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(scratch1,
+        FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(scratch1,
+        FieldMemOperand(scratch1,
+                        SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(scratch1);
+
+  __ bind(&formal_parameter_count_loaded);
+
+  // Calculate the end of destination area where we will put the arguments
+  // after we drop current frame. We add kPointerSize to count the receiver
+  // argument which is not included into formal parameters count.
+  Register dst_reg = scratch2;
+  __ Lsa(dst_reg, fp, scratch1, kPointerSizeLog2);
+  __ Addu(dst_reg, dst_reg,
+          Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+  Register src_reg = scratch1;
+  __ Lsa(src_reg, sp, args_reg, kPointerSizeLog2);
+  // Count receiver argument as well (not included in args_reg).
+  __ Addu(src_reg, src_reg, Operand(kPointerSize));
+
+  if (FLAG_debug_code) {
+    __ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+  }
+
+  // Restore caller's frame pointer and return address now as they will be
+  // overwritten by the copying loop.
+  __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+
+  // Both src_reg and dst_reg are pointing to the word after the one to copy,
+  // so they must be pre-decremented in the loop.
+  Register tmp_reg = scratch3;
+  Label loop, entry;
+  __ Branch(&entry);
+  __ bind(&loop);
+  __ Subu(src_reg, src_reg, Operand(kPointerSize));
+  __ Subu(dst_reg, dst_reg, Operand(kPointerSize));
+  __ lw(tmp_reg, MemOperand(src_reg));
+  __ sw(tmp_reg, MemOperand(dst_reg));
+  __ bind(&entry);
+  __ Branch(&loop, ne, sp, Operand(src_reg));
+
+  // Leave current frame.
+  __ mov(sp, dst_reg);
+
+  __ bind(&done);
+}
+}  // namespace
 
 // static
 void Builtins::Generate_CallFunction(MacroAssembler* masm,
-                                     ConvertReceiverMode mode) {
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
   //  -- a1 : the function to call (checked to be a JSFunction)
@@ -1979,8 +2093,7 @@
       __ LoadGlobalProxy(a3);
     } else {
       Label convert_to_object, convert_receiver;
-      __ sll(at, a0, kPointerSizeLog2);
-      __ addu(at, sp, at);
+      __ Lsa(at, sp, a0, kPointerSizeLog2);
       __ lw(a3, MemOperand(at));
       __ JumpIfSmi(a3, &convert_to_object);
       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
@@ -2016,8 +2129,7 @@
       __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
       __ bind(&convert_receiver);
     }
-    __ sll(at, a0, kPointerSizeLog2);
-    __ addu(at, sp, at);
+    __ Lsa(at, sp, a0, kPointerSizeLog2);
     __ sw(a3, MemOperand(at));
   }
   __ bind(&done_convert);
@@ -2029,6 +2141,10 @@
   //  -- cp : the function context.
   // -----------------------------------
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
   __ lw(a2,
         FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
   __ sra(a2, a2, kSmiTagSize);  // Un-tag.
@@ -2048,18 +2164,22 @@
 
 
 // static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
   //  -- a1 : the function to call (checked to be a JSBoundFunction)
   // -----------------------------------
   __ AssertBoundFunction(a1);
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
   // Patch the receiver to [[BoundThis]].
   {
     __ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
-    __ sll(t0, a0, kPointerSizeLog2);
-    __ addu(t0, t0, sp);
+    __ Lsa(t0, sp, a0, kPointerSizeLog2);
     __ sw(at, MemOperand(t0));
   }
 
@@ -2100,11 +2220,9 @@
     __ mov(t1, zero_reg);
     __ bind(&loop);
     __ Branch(&done_loop, gt, t1, Operand(a0));
-    __ sll(t2, t0, kPointerSizeLog2);
-    __ addu(t2, t2, sp);
+    __ Lsa(t2, sp, t0, kPointerSizeLog2);
     __ lw(at, MemOperand(t2));
-    __ sll(t2, t1, kPointerSizeLog2);
-    __ addu(t2, t2, sp);
+    __ Lsa(t2, sp, t1, kPointerSizeLog2);
     __ sw(at, MemOperand(t2));
     __ Addu(t0, t0, Operand(1));
     __ Addu(t1, t1, Operand(1));
@@ -2121,11 +2239,9 @@
     __ bind(&loop);
     __ Subu(t0, t0, Operand(1));
     __ Branch(&done_loop, lt, t0, Operand(zero_reg));
-    __ sll(t1, t0, kPointerSizeLog2);
-    __ addu(t1, t1, a2);
+    __ Lsa(t1, a2, t0, kPointerSizeLog2);
     __ lw(at, MemOperand(t1));
-    __ sll(t1, a0, kPointerSizeLog2);
-    __ addu(t1, t1, sp);
+    __ Lsa(t1, sp, a0, kPointerSizeLog2);
     __ sw(at, MemOperand(t1));
     __ Addu(a0, a0, Operand(1));
     __ Branch(&loop);
@@ -2143,7 +2259,8 @@
 
 
 // static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
   //  -- a1 : the target to call (can be any Object).
@@ -2153,12 +2270,23 @@
   __ JumpIfSmi(a1, &non_callable);
   __ bind(&non_smi);
   __ GetObjectType(a1, t1, t2);
-  __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
           RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
-  __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
           RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+  // Check if target has a [[Call]] internal method.
+  __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+  __ And(t1, t1, Operand(1 << Map::kIsCallable));
+  __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
   __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
 
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
   // 1. Runtime fallback for Proxy [[Call]].
   __ Push(a1);
   // Increase the arguments size to include the pushed function and the
@@ -2171,18 +2299,13 @@
   // 2. Call to something else, which might have a [[Call]] internal method (if
   // not we raise an exception).
   __ bind(&non_function);
-  // Check if target has a [[Call]] internal method.
-  __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
-  __ And(t1, t1, Operand(1 << Map::kIsCallable));
-  __ Branch(&non_callable, eq, t1, Operand(zero_reg));
   // Overwrite the original receiver with the (original) target.
-  __ sll(at, a0, kPointerSizeLog2);
-  __ addu(at, sp, at);
+  __ Lsa(at, sp, a0, kPointerSizeLog2);
   __ sw(a1, MemOperand(at));
   // Let the "call_as_function_delegate" take care of the rest.
   __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
   __ Jump(masm->isolate()->builtins()->CallFunction(
-              ConvertReceiverMode::kNotNullOrUndefined),
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
           RelocInfo::CODE_TARGET);
 
   // 3. Call to something that is not callable.
@@ -2264,11 +2387,9 @@
     __ mov(t1, zero_reg);
     __ bind(&loop);
     __ Branch(&done_loop, ge, t1, Operand(a0));
-    __ sll(t2, t0, kPointerSizeLog2);
-    __ addu(t2, t2, sp);
+    __ Lsa(t2, sp, t0, kPointerSizeLog2);
     __ lw(at, MemOperand(t2));
-    __ sll(t2, t1, kPointerSizeLog2);
-    __ addu(t2, t2, sp);
+    __ Lsa(t2, sp, t1, kPointerSizeLog2);
     __ sw(at, MemOperand(t2));
     __ Addu(t0, t0, Operand(1));
     __ Addu(t1, t1, Operand(1));
@@ -2285,11 +2406,9 @@
     __ bind(&loop);
     __ Subu(t0, t0, Operand(1));
     __ Branch(&done_loop, lt, t0, Operand(zero_reg));
-    __ sll(t1, t0, kPointerSizeLog2);
-    __ addu(t1, t1, a2);
+    __ Lsa(t1, a2, t0, kPointerSizeLog2);
     __ lw(at, MemOperand(t1));
-    __ sll(t1, a0, kPointerSizeLog2);
-    __ addu(t1, t1, sp);
+    __ Lsa(t1, sp, a0, kPointerSizeLog2);
     __ sw(at, MemOperand(t1));
     __ Addu(a0, a0, Operand(1));
     __ Branch(&loop);
@@ -2368,8 +2487,7 @@
   // Called Construct on an exotic Object with a [[Construct]] internal method.
   {
     // Overwrite the original receiver with the (original) target.
-    __ sll(at, a0, kPointerSizeLog2);
-    __ addu(at, sp, at);
+    __ Lsa(at, sp, a0, kPointerSizeLog2);
     __ sw(a1, MemOperand(at));
     // Let the "call_as_constructor_delegate" take care of the rest.
     __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
@@ -2412,8 +2530,7 @@
     ArgumentAdaptorStackCheck(masm, &stack_overflow);
 
     // Calculate copy start address into a0 and copy end address into t1.
-    __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
-    __ Addu(a0, fp, a0);
+    __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
     // Adjust for return address and receiver.
     __ Addu(a0, a0, Operand(2 * kPointerSize));
     // Compute copy end address.
@@ -2468,8 +2585,7 @@
     // a1: function
     // a2: expected number of arguments
     // a3: new target (passed through to callee)
-    __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
-    __ Addu(a0, fp, a0);
+    __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
     // Adjust for return address and receiver.
     __ Addu(a0, a0, Operand(2 * kPointerSize));
     // Compute copy end address. Also adjust for return address.
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index f88d3bd..541e73e 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -91,9 +91,8 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
-                                          Condition cc, Strength strength);
+                                          Condition cc);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
                                     Register lhs,
                                     Register rhs,
@@ -275,7 +274,7 @@
 // Equality is almost reflexive (everything but NaN), so this is a test
 // for "identity and not NaN".
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
-                                          Condition cc, Strength strength) {
+                                          Condition cc) {
   Label not_identical;
   Label heap_number, return_equal;
   Register exp_mask_reg = t5;
@@ -296,29 +295,15 @@
     __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
     // Call runtime on identical SIMD values since we must throw a TypeError.
     __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
-    if (is_strong(strength)) {
-      // Call the runtime on anything that is converted in the semantics, since
-      // we need to throw a TypeError. Smis have already been ruled out.
-      __ Branch(&return_equal, eq, t4, Operand(HEAP_NUMBER_TYPE));
-      __ And(t4, t4, Operand(kIsNotStringMask));
-      __ Branch(slow, ne, t4, Operand(zero_reg));
-    }
   } else {
     __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
     // Comparing JS objects with <=, >= is complicated.
     if (cc != eq) {
-    __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
-    // Call runtime on identical symbols since we need to throw a TypeError.
-    __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
-    // Call runtime on identical SIMD values since we must throw a TypeError.
-    __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
-    if (is_strong(strength)) {
-      // Call the runtime on anything that is converted in the semantics,
-      // since we need to throw a TypeError. Smis and heap numbers have
-      // already been ruled out.
-      __ And(t4, t4, Operand(kIsNotStringMask));
-      __ Branch(slow, ne, t4, Operand(zero_reg));
-    }
+      __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
+      // Call runtime on identical symbols since we need to throw a TypeError.
+      __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
+      // Call runtime on identical SIMD values since we must throw a TypeError.
+      __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
       // Normally here we fall through to return_equal, but undefined is
       // special: (undefined == undefined) == true, but
       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
@@ -514,45 +499,55 @@
 
 // Fast negative check for internalized-to-internalized equality.
 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
-                                                     Register lhs,
-                                                     Register rhs,
+                                                     Register lhs, Register rhs,
                                                      Label* possible_strings,
-                                                     Label* not_both_strings) {
+                                                     Label* runtime_call) {
   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
          (lhs.is(a1) && rhs.is(a0)));
 
   // a2 is object type of rhs.
-  Label object_test;
+  Label object_test, return_unequal, undetectable;
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   __ And(at, a2, Operand(kIsNotStringMask));
   __ Branch(&object_test, ne, at, Operand(zero_reg));
   __ And(at, a2, Operand(kIsNotInternalizedMask));
   __ Branch(possible_strings, ne, at, Operand(zero_reg));
   __ GetObjectType(rhs, a3, a3);
-  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+  __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
   __ And(at, a3, Operand(kIsNotInternalizedMask));
   __ Branch(possible_strings, ne, at, Operand(zero_reg));
 
-  // Both are internalized strings. We already checked they weren't the same
-  // pointer so they are not equal.
+  // Both are internalized. We already checked they weren't the same pointer so
+  // they are not equal. Return non-equal by returning the non-zero object
+  // pointer in v0.
   __ Ret(USE_DELAY_SLOT);
-  __ li(v0, Operand(1));   // Non-zero indicates not equal.
+  __ mov(v0, a0);  // In delay slot.
 
   __ bind(&object_test);
-  __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
-  __ GetObjectType(rhs, a2, a3);
-  __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ lw(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
+  __ lw(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
+  __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
+  __ And(at, t0, Operand(1 << Map::kIsUndetectable));
+  __ Branch(&undetectable, ne, at, Operand(zero_reg));
+  __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+  __ Branch(&return_unequal, ne, at, Operand(zero_reg));
 
-  // If both objects are undetectable, they are equal.  Otherwise, they
-  // are not equal, since they are different objects and an object is not
-  // equal to undefined.
-  __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
-  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
-  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
-  __ and_(a0, a2, a3);
-  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+  __ GetInstanceType(a2, a2);
+  __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ GetInstanceType(a3, a3);
+  __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+  __ bind(&return_unequal);
+  // Return non-equal by returning the non-zero object pointer in v0.
   __ Ret(USE_DELAY_SLOT);
-  __ xori(v0, a0, 1 << Map::kIsUndetectable);
+  __ mov(v0, a0);  // In delay slot.
+
+  __ bind(&undetectable);
+  __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+  __ Branch(&return_unequal, eq, at, Operand(zero_reg));
+  __ Ret(USE_DELAY_SLOT);
+  __ li(v0, Operand(EQUAL));  // In delay slot.
 }
 
 
@@ -603,7 +598,7 @@
 
   // Handle the case where the objects are identical.  Either returns the answer
   // or goes to slow.  Only falls through if the objects were not identical.
-  EmitIdenticalObjectComparison(masm, &slow, cc, strength());
+  EmitIdenticalObjectComparison(masm, &slow, cc);
 
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
@@ -723,13 +718,21 @@
   // Never falls through to here.
 
   __ bind(&slow);
-  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
-  // a1 (rhs) second.
-  __ Push(lhs, rhs);
-  // Figure out which native to call and setup the arguments.
   if (cc == eq) {
-    __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(lhs, rhs);
+      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+    }
+    // Turn true into 0 and false into some non-zero value.
+    STATIC_ASSERT(EQUAL == 0);
+    __ LoadRoot(a0, Heap::kTrueValueRootIndex);
+    __ Ret(USE_DELAY_SLOT);
+    __ subu(v0, v0, a0);  // In delay slot.
   } else {
+    // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
+    // a1 (rhs) second.
+    __ Push(lhs, rhs);
     int ncr;  // NaN compare result.
     if (cc == lt || cc == le) {
       ncr = GREATER;
@@ -742,8 +745,7 @@
 
     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
     // tagged as a small integer.
-    __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
-                                             : Runtime::kCompare);
+    __ TailCallRuntime(Runtime::kCompare);
   }
 
   __ bind(&miss);
@@ -973,7 +975,6 @@
   __ cvt_d_w(double_exponent, single_scratch);
 
   // Returning or bailing out.
-  Counters* counters = isolate()->counters();
   if (exponent_type() == ON_STACK) {
     // The arguments are still on the stack.
     __ bind(&call_runtime);
@@ -987,7 +988,6 @@
     __ sdc1(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     DCHECK(heapnumber.is(v0));
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
     __ DropAndRet(2);
   } else {
     __ push(ra);
@@ -1003,7 +1003,6 @@
     __ MovFromFloatResult(double_result);
 
     __ bind(&done);
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
     __ Ret();
   }
 }
@@ -1075,8 +1074,7 @@
     __ mov(s1, a2);
   } else {
     // Compute the argv pointer in a callee-saved register.
-    __ sll(s1, a0, kPointerSizeLog2);
-    __ Addu(s1, sp, s1);
+    __ Lsa(s1, sp, a0, kPointerSizeLog2);
     __ Subu(s1, s1, kPointerSize);
   }
 
@@ -1092,48 +1090,77 @@
   // a0 = argc
   __ mov(s0, a0);
   __ mov(s2, a1);
-  // a1 = argv (set in the delay slot after find_ra below).
 
   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
   // also need to reserve the 4 argument slots on the stack.
 
   __ AssertStackIsAligned();
 
-  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+  int frame_alignment_mask = frame_alignment - 1;
+  int result_stack_size;
+  if (result_size() <= 2) {
+    // a0 = argc, a1 = argv, a2 = isolate
+    __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+    __ mov(a1, s1);
+    result_stack_size = 0;
+  } else {
+    DCHECK_EQ(3, result_size());
+    // Allocate additional space for the result.
+    result_stack_size =
+        ((result_size() * kPointerSize) + frame_alignment_mask) &
+        ~frame_alignment_mask;
+    __ Subu(sp, sp, Operand(result_stack_size));
+
+    // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
+    __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
+    __ mov(a2, s1);
+    __ mov(a1, a0);
+    __ mov(a0, sp);
+  }
 
   // To let the GC traverse the return address of the exit frames, we need to
   // know where the return address is. The CEntryStub is unmovable, so
   // we can store the address on the stack to be able to find it again and
   // we never have to restore it, because it will not change.
   { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
-    // This branch-and-link sequence is needed to find the current PC on mips,
-    // saved to the ra register.
-    // Use masm-> here instead of the double-underscore macro since extra
-    // coverage code can interfere with the proper calculation of ra.
+    int kNumInstructionsToJump = 4;
     Label find_ra;
-    masm->bal(&find_ra);  // bal exposes branch delay slot.
-    masm->mov(a1, s1);
-    masm->bind(&find_ra);
-
     // Adjust the value in ra to point to the correct return location, 2nd
     // instruction past the real call into C code (the jalr(t9)), and push it.
     // This is the return address of the exit frame.
-    const int kNumInstructionsToJump = 5;
-    masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
-    masm->sw(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
+    if (kArchVariant >= kMips32r6) {
+      __ addiupc(ra, kNumInstructionsToJump + 1);
+    } else {
+      // This branch-and-link sequence is needed to find the current PC on mips
+      // before r6, saved to the ra register.
+      __ bal(&find_ra);  // bal exposes branch delay slot.
+      __ Addu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
+    }
+    __ bind(&find_ra);
+
+    // This spot was reserved in EnterExitFrame.
+    __ sw(ra, MemOperand(sp, result_stack_size));
     // Stack space reservation moved to the branch delay slot below.
     // Stack is still aligned.
 
     // Call the C routine.
-    masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
-    masm->jalr(t9);
+    __ mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
+    __ jalr(t9);
     // Set up sp in the delay slot.
-    masm->addiu(sp, sp, -kCArgsSlotsSize);
+    __ addiu(sp, sp, -kCArgsSlotsSize);
     // Make sure the stored 'ra' points to this position.
     DCHECK_EQ(kNumInstructionsToJump,
               masm->InstructionsGeneratedSince(&find_ra));
   }
-
+  if (result_size() > 2) {
+    DCHECK_EQ(3, result_size());
+    // Read result values stored on stack.
+    __ lw(a0, MemOperand(v0, 2 * kPointerSize));
+    __ lw(v1, MemOperand(v0, 1 * kPointerSize));
+    __ lw(v0, MemOperand(v0, 0 * kPointerSize));
+  }
+  // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
 
   // Check result for exception sentinel.
   Label exception_returned;
@@ -1556,303 +1583,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  // The displacement is the offset of the last parameter (if any)
-  // relative to the frame pointer.
-  const int kDisplacement =
-      StandardFrameConstants::kCallerSPOffset - kPointerSize;
-  DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
-  DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
-  // Check that the key is a smiGenerateReadElement.
-  Label slow;
-  __ JumpIfNotSmi(a1, &slow);
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor;
-  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
-  __ Branch(&adaptor,
-            eq,
-            a3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Check index (a1) against formal parameters count limit passed in
-  // through register a0. Use unsigned comparison to get negative
-  // check for free.
-  __ Branch(&slow, hs, a1, Operand(a0));
-
-  // Read the argument from the stack and return it.
-  __ subu(a3, a0, a1);
-  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(a3, fp, Operand(t3));
-  __ Ret(USE_DELAY_SLOT);
-  __ lw(v0, MemOperand(a3, kDisplacement));
-
-  // Arguments adaptor case: Check index (a1) against actual arguments
-  // limit found in the arguments adaptor frame. Use unsigned
-  // comparison to get negative check for free.
-  __ bind(&adaptor);
-  __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
-
-  // Read the argument from the adaptor frame and return it.
-  __ subu(a3, a0, a1);
-  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(a3, a2, Operand(t3));
-  __ Ret(USE_DELAY_SLOT);
-  __ lw(v0, MemOperand(a3, kDisplacement));
-
-  // Slow-case: Handle non-smi or out-of-bounds access to arguments
-  // by calling the runtime system.
-  __ bind(&slow);
-  __ push(a1);
-  __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
-  // a1 : function
-  // a2 : number of parameters (tagged)
-  // a3 : parameters pointer
-
-  DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
-  __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
-  __ Branch(&runtime, ne, a0,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Patch the arguments.length and the parameters pointer in the current frame.
-  __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ sll(t3, a2, 1);
-  __ Addu(t0, t0, Operand(t3));
-  __ addiu(a3, t0, StandardFrameConstants::kCallerSPOffset);
-
-  __ bind(&runtime);
-  __ Push(a1, a3, a2);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
-  // a1 : function
-  // a2 : number of parameters (tagged)
-  // a3 : parameters pointer
-  // Registers used over whole function:
-  //  t1 : arguments count (tagged)
-  //  t2 : mapped parameter count (tagged)
-
-  DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
-  __ Branch(&adaptor_frame, eq, a0,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // No adaptor, parameter count = argument count.
-  __ mov(t1, a2);
-  __ Branch(USE_DELAY_SLOT, &try_allocate);
-  __ mov(t2, a2);  // In delay slot.
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ sll(t6, t1, 1);
-  __ Addu(t0, t0, Operand(t6));
-  __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // t1 = argument count (tagged)
-  // t2 = parameter count (tagged)
-  // Compute the mapped parameter count = min(t2, t1) in t2.
-  __ mov(t2, a2);
-  __ Branch(&try_allocate, le, t2, Operand(t1));
-  __ mov(t2, t1);
-
-  __ bind(&try_allocate);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  // If there are no mapped parameters, we do not need the parameter_map.
-  Label param_map_size;
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
-  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
-  __ mov(t5, zero_reg);  // In delay slot: param map size = 0 when t2 == 0.
-  __ sll(t5, t2, 1);
-  __ addiu(t5, t5, kParameterMapHeaderSize);
-  __ bind(&param_map_size);
-
-  // 2. Backing store.
-  __ sll(t6, t1, 1);
-  __ Addu(t5, t5, Operand(t6));
-  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
-
-  // v0 = address of new object(s) (tagged)
-  // a2 = argument count (smi-tagged)
-  // Get the arguments boilerplate from the current native context into t0.
-  const int kNormalOffset =
-      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
-  const int kAliasedOffset =
-      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
-  __ lw(t0, NativeContextMemOperand());
-  Label skip2_ne, skip2_eq;
-  __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
-  __ lw(t0, MemOperand(t0, kNormalOffset));
-  __ bind(&skip2_ne);
-
-  __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
-  __ lw(t0, MemOperand(t0, kAliasedOffset));
-  __ bind(&skip2_eq);
-
-  // v0 = address of new object (tagged)
-  // a2 = argument count (smi-tagged)
-  // t0 = address of arguments map (tagged)
-  // t2 = mapped parameter count (tagged)
-  __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
-  __ LoadRoot(t5, Heap::kEmptyFixedArrayRootIndex);
-  __ sw(t5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
-  __ sw(t5, FieldMemOperand(v0, JSObject::kElementsOffset));
-
-  // Set up the callee in-object property.
-  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
-  __ AssertNotSmi(a1);
-  const int kCalleeOffset = JSObject::kHeaderSize +
-      Heap::kArgumentsCalleeIndex * kPointerSize;
-  __ sw(a1, FieldMemOperand(v0, kCalleeOffset));
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(t1);
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  const int kLengthOffset = JSObject::kHeaderSize +
-      Heap::kArgumentsLengthIndex * kPointerSize;
-  __ sw(t1, FieldMemOperand(v0, kLengthOffset));
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, t0 will point there, otherwise
-  // it will point to the backing store.
-  __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
-  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
-
-  // v0 = address of new object (tagged)
-  // a2 = argument count (tagged)
-  // t0 = address of parameter map or backing store (tagged)
-  // t2 = mapped parameter count (tagged)
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  Label skip3;
-  __ Branch(&skip3, ne, t2, Operand(Smi::FromInt(0)));
-  // Move backing store address to a1, because it is
-  // expected there when filling in the unmapped arguments.
-  __ mov(a1, t0);
-  __ bind(&skip3);
-
-  __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0)));
-
-  __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
-  __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
-  __ Addu(t1, t2, Operand(Smi::FromInt(2)));
-  __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
-  __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
-  __ sll(t6, t2, 1);
-  __ Addu(t1, t0, Operand(t6));
-  __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
-  __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop, parameters_test;
-  __ mov(t1, t2);
-  __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
-  __ Subu(t5, t5, Operand(t2));
-  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
-  __ sll(t6, t1, 1);
-  __ Addu(a1, t0, Operand(t6));
-  __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
-
-  // a1 = address of backing store (tagged)
-  // t0 = address of parameter map (tagged)
-  // a0 = temporary scratch (a.o., for address calculation)
-  // t1 = loop variable (tagged)
-  // t3 = the hole value
-  __ jmp(&parameters_test);
-
-  __ bind(&parameters_loop);
-  __ Subu(t1, t1, Operand(Smi::FromInt(1)));
-  __ sll(a0, t1, 1);
-  __ Addu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-  __ Addu(t6, t0, a0);
-  __ sw(t5, MemOperand(t6));
-  __ Subu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
-  __ Addu(t6, a1, a0);
-  __ sw(t3, MemOperand(t6));
-  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
-  __ bind(&parameters_test);
-  __ Branch(&parameters_loop, ne, t1, Operand(Smi::FromInt(0)));
-
-  // t1 = argument count (tagged).
-  __ lw(t1, FieldMemOperand(v0, kLengthOffset));
-
-  __ bind(&skip_parameter_map);
-  // v0 = address of new object (tagged)
-  // a1 = address of backing store (tagged)
-  // t1 = argument count (tagged)
-  // t2 = mapped parameter count (tagged)
-  // t5 = scratch
-  // Copy arguments header and remaining slots (if there are any).
-  __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
-  __ sw(t5, FieldMemOperand(a1, FixedArray::kMapOffset));
-  __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset));
-
-  Label arguments_loop, arguments_test;
-  __ sll(t6, t2, 1);
-  __ Subu(a3, a3, Operand(t6));
-  __ jmp(&arguments_test);
-
-  __ bind(&arguments_loop);
-  __ Subu(a3, a3, Operand(kPointerSize));
-  __ lw(t0, MemOperand(a3, 0));
-  __ sll(t6, t2, 1);
-  __ Addu(t5, a1, Operand(t6));
-  __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
-  __ Addu(t2, t2, Operand(Smi::FromInt(1)));
-
-  __ bind(&arguments_test);
-  __ Branch(&arguments_loop, lt, t2, Operand(t1));
-
-  // Return.
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  // t1 = argument count (tagged)
-  __ bind(&runtime);
-  __ Push(a1, a3, t1);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
   // Return address is in ra.
   Label slow;
@@ -1875,122 +1605,6 @@
       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
 }
 
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
-  // a1 : function
-  // a2 : number of parameters (tagged)
-  // a3 : parameters pointer
-
-  DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label try_allocate, runtime;
-  __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
-  __ Branch(&try_allocate, ne, a0,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Patch the arguments.length and the parameters pointer.
-  __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t0, t0, Operand(at));
-  __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Try the new space allocation. Start out with computing the size
-  // of the arguments object and the elements array in words.
-  Label add_arguments_object;
-  __ bind(&try_allocate);
-  __ SmiUntag(t5, a2);
-  __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg));
-
-  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize / kPointerSize));
-  __ bind(&add_arguments_object);
-  __ Addu(t5, t5, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
-
-  // Do the allocation of both objects in one go.
-  __ Allocate(t5, v0, t0, t1, &runtime,
-              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
-  // Get the arguments boilerplate from the current native context.
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, t0);
-
-  __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
-  __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
-  __ sw(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
-  __ sw(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
-
-  // Get the length (smi tagged) and set that as an in-object property too.
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  __ AssertSmi(a2);
-  __ sw(a2,
-        FieldMemOperand(v0, JSObject::kHeaderSize +
-                                Heap::kArgumentsLengthIndex * kPointerSize));
-
-  Label done;
-  __ Branch(&done, eq, a2, Operand(zero_reg));
-
-  // Set up the elements pointer in the allocated arguments object and
-  // initialize the header in the elements fixed array.
-  __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
-  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
-  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
-  __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
-  __ sw(a2, FieldMemOperand(t0, FixedArray::kLengthOffset));
-  __ SmiUntag(a2);
-
-  // Copy the fixed array slots.
-  Label loop;
-  // Set up t0 to point to the first array slot.
-  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ bind(&loop);
-  // Pre-decrement a3 with kPointerSize on each iteration.
-  // Pre-decrement in order to skip receiver.
-  __ Addu(a3, a3, Operand(-kPointerSize));
-  __ lw(t1, MemOperand(a3));
-  // Post-increment t0 with kPointerSize on each iteration.
-  __ sw(t1, MemOperand(t0));
-  __ Addu(t0, t0, Operand(kPointerSize));
-  __ Subu(a2, a2, Operand(1));
-  __ Branch(&loop, ne, a2, Operand(zero_reg));
-
-  // Return.
-  __ bind(&done);
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ Push(a1, a3, a2);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
-  // a2 : number of parameters (tagged)
-  // a3 : parameters pointer
-  // a1 : rest parameter index (tagged)
-  // Check if the calling frame is an arguments adaptor frame.
-
-  Label runtime;
-  __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ lw(t1, MemOperand(t0, StandardFrameConstants::kContextOffset));
-  __ Branch(&runtime, ne, t1,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Patch the arguments.length and the parameters pointer.
-  __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ sll(t1, a2, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(a3, t0, Operand(t1));
-  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ Push(a2, a3, a1);
-  __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -2023,8 +1637,7 @@
 
   // Ensure that a RegExp stack is allocated.
   ExternalReference address_of_regexp_stack_memory_address =
-      ExternalReference::address_of_regexp_stack_memory_address(
-          isolate());
+      ExternalReference::address_of_regexp_stack_memory_address(isolate());
   ExternalReference address_of_regexp_stack_memory_size =
       ExternalReference::address_of_regexp_stack_memory_size(isolate());
   __ li(a0, Operand(address_of_regexp_stack_memory_size));
@@ -2076,34 +1689,33 @@
   __ lw(subject, MemOperand(sp, kSubjectOffset));
   __ JumpIfSmi(subject, &runtime);
   __ mov(a3, subject);  // Make a copy of the original subject string.
-  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
   // subject: subject string
   // a3: subject string
-  // a0: subject string instance type
   // regexp_data: RegExp data (FixedArray)
   // Handle subject string according to its encoding and representation:
-  // (1) Sequential string?  If yes, go to (5).
-  // (2) Anything but sequential or cons?  If yes, go to (6).
-  // (3) Cons string.  If the string is flat, replace subject with first string.
-  //     Otherwise bailout.
-  // (4) Is subject external?  If yes, go to (7).
-  // (5) Sequential string.  Load regexp code according to encoding.
+  // (1) Sequential string?  If yes, go to (4).
+  // (2) Sequential or cons?  If not, go to (5).
+  // (3) Cons string.  If the string is flat, replace subject with first string
+  //     and go to (1). Otherwise bail out to runtime.
+  // (4) Sequential string.  Load regexp code according to encoding.
   // (E) Carry on.
   /// [...]
 
   // Deferred code at the end of the stub:
-  // (6) Not a long external string?  If yes, go to (8).
-  // (7) External string.  Make it, offset-wise, look like a sequential string.
-  //     Go to (5).
-  // (8) Short external string or not a string?  If yes, bail out to runtime.
-  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+  // (5) Long external string?  If not, go to (7).
+  // (6) External string.  Make it, offset-wise, look like a sequential string.
+  //     Go to (4).
+  // (7) Short external string or not a string?  If yes, bail out to runtime.
+  // (8) Sliced string.  Replace subject with parent.  Go to (1).
 
-  Label seq_string /* 5 */, external_string /* 7 */,
-        check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
-        not_long_external /* 8 */;
+  Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
+      not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
 
-  // (1) Sequential string?  If yes, go to (5).
+  __ bind(&check_underlying);
+  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+
+  // (1) Sequential string?  If yes, go to (4).
   __ And(a1,
          a0,
          Operand(kIsNotStringMask |
@@ -2112,12 +1724,12 @@
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (5).
 
-  // (2) Anything but sequential or cons?  If yes, go to (6).
+  // (2) Sequential or cons?  If not, go to (5).
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
-  // Go to (6).
+  // Go to (5).
   __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
 
   // (3) Cons string.  Check that it's flat.
@@ -2126,19 +1738,9 @@
   __ LoadRoot(a1, Heap::kempty_stringRootIndex);
   __ Branch(&runtime, ne, a0, Operand(a1));
   __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  __ jmp(&check_underlying);
 
-  // (4) Is subject external?  If yes, go to (7).
-  __ bind(&check_underlying);
-  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(at, a0, Operand(kStringRepresentationMask));
-  // The underlying external string is never a short external string.
-  STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
-  STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
-  __ Branch(&external_string, ne, at, Operand(zero_reg));  // Go to (7).
-
-  // (5) Sequential string.  Load regexp code according to encoding.
+  // (4) Sequential string.  Load regexp code according to encoding.
   __ bind(&seq_string);
   // subject: sequential subject string (or look-alike, external string)
   // a3: original subject string
@@ -2380,12 +1982,12 @@
   __ TailCallRuntime(Runtime::kRegExpExec);
 
   // Deferred code for string handling.
-  // (6) Not a long external string?  If yes, go to (8).
+  // (5) Long external string?  If not, go to (7).
   __ bind(&not_seq_nor_cons);
-  // Go to (8).
+  // Go to (7).
   __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
 
-  // (7) External string.  Make it, offset-wise, look like a sequential string.
+  // (6) External string.  Make it, offset-wise, look like a sequential string.
   __ bind(&external_string);
   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
@@ -2407,13 +2009,13 @@
           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
   __ jmp(&seq_string);    // Go to (5).
 
-  // (8) Short external string or not a string?  If yes, bail out to runtime.
+  // (7) Short external string or not a string?  If yes, bail out to runtime.
   __ bind(&not_long_external);
   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
   __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
   __ Branch(&runtime, ne, at, Operand(zero_reg));
 
-  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+  // (8) Sliced string.  Replace subject with parent.  Go to (4).
   // Load offset into t0 and replace subject string with parent.
   __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ sra(t0, t0, kSmiTagSize);
@@ -2461,8 +2063,7 @@
             masm->isolate()->heap()->uninitialized_symbol());
 
   // Load the cache state into t2.
-  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t2, a2, Operand(t2));
+  __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   __ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize));
 
   // A monomorphic cache hit or an already megamorphic state: invoke the
@@ -2506,8 +2107,7 @@
   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   // write-barrier is needed.
   __ bind(&megamorphic);
-  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t2, a2, Operand(t2));
+  __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize));
   __ jmp(&done);
@@ -2547,8 +2147,7 @@
 
   GenerateRecordCallTarget(masm);
 
-  __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t1, a2, at);
+  __ Lsa(t1, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   Label feedback_register_initialized;
   // Put the AllocationSite from the feedback vector into a2, or undefined.
   __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
@@ -2587,8 +2186,7 @@
   __ li(a0, Operand(arg_count()));
 
   // Increment the call count for monomorphic function calls.
-  __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(at, a2, Operand(at));
+  __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
   __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
   __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
@@ -2609,8 +2207,7 @@
   ParameterCount actual(argc);
 
   // The checks. First, does r1 match the recorded monomorphic target?
-  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t0, a2, Operand(t0));
+  __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
 
   // We don't know that we have a weak cell. We might have a private symbol
@@ -2635,14 +2232,14 @@
   __ JumpIfSmi(a1, &extra_checks_or_miss);
 
   // Increment the call count for monomorphic function calls.
-  __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(at, a2, Operand(at));
+  __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   __ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
   __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
   __ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
 
   __ bind(&call_function);
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+                                                    tail_call_mode()),
           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
           USE_DELAY_SLOT);
   __ li(a0, Operand(argc));  // In delay slot.
@@ -2676,13 +2273,12 @@
   __ AssertNotSmi(t0);
   __ GetObjectType(t0, t1, t1);
   __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
-  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t0, a2, Operand(t0));
+  __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
 
   __ bind(&call);
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
           USE_DELAY_SLOT);
   __ li(a0, Operand(argc));  // In delay slot.
@@ -2708,8 +2304,7 @@
   __ Branch(&miss, ne, t0, Operand(t1));
 
   // Initialize the call counter.
-  __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(at, a2, Operand(at));
+  __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
   __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
   __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
 
@@ -2873,8 +2468,7 @@
   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
   // At this point code register contains smi tagged one-byte char code.
   STATIC_ASSERT(kSmiTag == 0);
-  __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(result_, result_, t0);
+  __ Lsa(result_, result_, code_, kPointerSizeLog2 - kSmiTagSize);
   __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
   __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
   __ Branch(&slow_case_, eq, result_, Operand(t0));
@@ -3131,8 +2725,7 @@
 
   // Locate first character of substring to copy.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ sll(t0, a3, 1);
-  __ Addu(t1, t1, t0);
+  __ Lsa(t1, t1, a3, 1);
   // Locate first character of result.
   __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
 
@@ -3259,6 +2852,39 @@
 }
 
 
+void ToNameStub::Generate(MacroAssembler* masm) {
+  // The ToName stub takes on argument in a0.
+  Label is_number;
+  __ JumpIfSmi(a0, &is_number);
+
+  Label not_name;
+  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
+  __ Branch(&not_name, gt, a1, Operand(LAST_NAME_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&not_name);
+
+  Label not_heap_number;
+  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+  __ bind(&is_number);
+  NumberToStringStub stub(isolate());
+  __ TailCallStub(&stub);
+  __ bind(&not_heap_number);
+
+  Label not_oddball;
+  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ lw(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
+  __ bind(&not_oddball);
+
+  __ push(a0);  // Push argument.
+  __ TailCallRuntime(Runtime::kToName);
+}
+
+
 void StringHelper::GenerateFlatOneByteStringEquals(
     MacroAssembler* masm, Register left, Register right, Register scratch1,
     Register scratch2, Register scratch3) {
@@ -3431,18 +3057,14 @@
 
   __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
-  if (op() != Token::EQ_STRICT && is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
-  } else {
-    if (!Token::IsEqualityOp(op())) {
-      __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
-      __ AssertSmi(a1);
-      __ lw(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
-      __ AssertSmi(a0);
-    }
-    __ Ret(USE_DELAY_SLOT);
-    __ Subu(v0, a1, a0);
+  if (!Token::IsEqualityOp(op())) {
+    __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+    __ AssertSmi(a1);
+    __ lw(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+    __ AssertSmi(a0);
   }
+  __ Ret(USE_DELAY_SLOT);
+  __ Subu(v0, a1, a0);
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -3540,7 +3162,7 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
                      CompareICState::GENERIC, CompareICState::GENERIC);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
@@ -3770,8 +3392,6 @@
   if (Token::IsEqualityOp(op())) {
     __ Ret(USE_DELAY_SLOT);
     __ subu(v0, a0, a1);
-  } else if (is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   } else {
     if (op() == Token::LT || op() == Token::LTE) {
       __ li(a2, Operand(Smi::FromInt(GREATER)));
@@ -3867,15 +3487,13 @@
 
     // Scale the index by multiplying by the entry size.
     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
-    __ sll(at, index, 1);
-    __ Addu(index, index, at);
+    __ Lsa(index, index, index, 1);
 
     Register entity_name = scratch0;
     // Having undefined at this place means the name is not contained.
     STATIC_ASSERT(kSmiTagSize == 1);
     Register tmp = properties;
-    __ sll(scratch0, index, 1);
-    __ Addu(tmp, properties, scratch0);
+    __ Lsa(tmp, properties, index, 1);
     __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
 
     DCHECK(!tmp.is(entity_name));
@@ -3965,12 +3583,10 @@
     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
     // scratch2 = scratch2 * 3.
 
-    __ sll(at, scratch2, 1);
-    __ Addu(scratch2, scratch2, at);
+    __ Lsa(scratch2, scratch2, scratch2, 1);
 
     // Check if the key is identical to the name.
-    __ sll(at, scratch2, 2);
-    __ Addu(scratch2, elements, at);
+    __ Lsa(scratch2, elements, scratch2, 2);
     __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
     __ Branch(done, eq, name, Operand(at));
   }
@@ -4051,14 +3667,10 @@
     // Scale the index by multiplying by the entry size.
     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
     // index *= 3.
-    __ mov(at, index);
-    __ sll(index, index, 1);
-    __ Addu(index, index, at);
-
+    __ Lsa(index, index, index, 1);
 
     STATIC_ASSERT(kSmiTagSize == 1);
-    __ sll(index, index, 2);
-    __ Addu(index, index, dictionary);
+    __ Lsa(index, dictionary, index, 2);
     __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
 
     // Having undefined at this place means the name is not contained.
@@ -4158,11 +3770,8 @@
                            regs_.scratch0(),
                            &dont_need_remembered_set);
 
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch0(),
-                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                     ne,
-                     &dont_need_remembered_set);
+    __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+                        &dont_need_remembered_set);
 
     // First notify the incremental marker if necessary, then update the
     // remembered set.
@@ -4382,8 +3991,7 @@
   //                         aka feedback     scratch2
   // also need receiver_map
   // use cached_map (scratch1) to look in the weak map values.
-  __ sll(at, length, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(too_far, feedback, Operand(at));
+  __ Lsa(too_far, feedback, length, kPointerSizeLog2 - kSmiTagSize);
   __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ Addu(pointer_reg, feedback,
           Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
@@ -4419,8 +4027,7 @@
   __ Branch(try_array, ne, cached_map, Operand(receiver_map));
   Register handler = feedback;
 
-  __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(handler, vector, Operand(at));
+  __ Lsa(handler, vector, slot, kPointerSizeLog2 - kSmiTagSize);
   __ lw(handler,
         FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
   __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4437,8 +4044,7 @@
   Register receiver_map = t1;
   Register scratch1 = t4;
 
-  __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(feedback, vector, Operand(at));
+  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
   __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
 
   // Try to quickly handle the monomorphic case without knowing for sure
@@ -4493,8 +4099,7 @@
   Register receiver_map = t1;
   Register scratch1 = t4;
 
-  __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(feedback, vector, Operand(at));
+  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
   __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
 
   // Try to quickly handle the monomorphic case without knowing for sure
@@ -4530,8 +4135,7 @@
   __ Branch(&miss, ne, key, Operand(feedback));
   // If the name comparison succeeded, we know we have a fixed array with
   // at least one map/handler pair.
-  __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(feedback, vector, Operand(at));
+  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
   __ lw(feedback,
         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
   HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss);
@@ -4579,8 +4183,7 @@
   Register receiver_map = t2;
   Register scratch1 = t5;
 
-  __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(feedback, vector, Operand(scratch1));
+  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
   __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
 
   // Try to quickly handle the monomorphic case without knowing for sure
@@ -4652,8 +4255,7 @@
   //             aka feedback                       scratch2
   // also need receiver_map
   // use cached_map (scratch1) to look in the weak map values.
-  __ sll(scratch1, too_far, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(too_far, feedback, Operand(scratch1));
+  __ Lsa(too_far, feedback, too_far, kPointerSizeLog2 - kSmiTagSize);
   __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ Addu(pointer_reg, feedback,
           Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
@@ -4702,8 +4304,7 @@
   Register receiver_map = t2;
   Register scratch1 = t5;
 
-  __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(feedback, vector, Operand(scratch1));
+  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
   __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
 
   // Try to quickly handle the monomorphic case without knowing for sure
@@ -4742,8 +4343,7 @@
   __ Branch(&miss, ne, key, Operand(feedback));
   // If the name comparison succeeded, we know we have a fixed array with
   // at least one map/handler pair.
-  __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(feedback, vector, Operand(scratch1));
+  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
   __ lw(feedback,
         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
@@ -5050,8 +4650,7 @@
   switch (argument_count()) {
     case ANY:
     case MORE_THAN_ONE:
-      __ sll(at, a0, kPointerSizeLog2);
-      __ addu(at, sp, at);
+      __ Lsa(at, sp, a0, kPointerSizeLog2);
       __ sw(a1, MemOperand(at));
       __ li(at, Operand(3));
       __ addu(a0, a0, at);
@@ -5144,6 +4743,592 @@
 }
 
 
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a1 : target
+  //  -- a3 : new target
+  //  -- cp : context
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertFunction(a1);
+  __ AssertReceiver(a3);
+
+  // Verify that the new target is a JSFunction.
+  Label new_object;
+  __ GetObjectType(a3, a2, a2);
+  __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // Load the initial map and verify that it's in fact a map.
+  __ lw(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
+  __ JumpIfSmi(a2, &new_object);
+  __ GetObjectType(a2, a0, a0);
+  __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
+
+  // Fall back to runtime if the target differs from the new target's
+  // initial map constructor.
+  __ lw(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+  __ Branch(&new_object, ne, a0, Operand(a1));
+
+  // Allocate the JSObject on the heap.
+  Label allocate, done_allocate;
+  __ lbu(t0, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+  __ Allocate(t0, v0, t1, a0, &allocate, SIZE_IN_WORDS);
+  __ bind(&done_allocate);
+
+  // Initialize the JSObject fields.
+  __ sw(a2, MemOperand(v0, JSObject::kMapOffset));
+  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(a3, MemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a3, MemOperand(v0, JSObject::kElementsOffset));
+  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+  __ Addu(a1, v0, Operand(JSObject::kHeaderSize));
+
+  // ----------- S t a t e -------------
+  //  -- v0 : result (untagged)
+  //  -- a1 : result fields (untagged)
+  //  -- t1 : result end (untagged)
+  //  -- a2 : initial map
+  //  -- cp : context
+  //  -- ra : return address
+  // -----------------------------------
+
+  // Perform in-object slack tracking if requested.
+  Label slack_tracking;
+  STATIC_ASSERT(Map::kNoSlackTracking == 0);
+  __ lw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+  __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
+  __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(0));
+  __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);  // In delay slot.
+  {
+    // Initialize all in-object fields with undefined.
+    __ InitializeFieldsWithFiller(a1, t1, a0);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ Ret(USE_DELAY_SLOT);
+    __ Addu(v0, v0, Operand(kHeapObjectTag));  // In delay slot.
+  }
+  __ bind(&slack_tracking);
+  {
+    // Decrease generous allocation count.
+    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+    __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
+    __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+
+    // Initialize the in-object fields with undefined.
+    __ lbu(t0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+    __ sll(t0, t0, kPointerSizeLog2);
+    __ subu(t0, t1, t0);
+    __ InitializeFieldsWithFiller(a1, t0, a0);
+
+    // Initialize the remaining (reserved) fields with one pointer filler map.
+    __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
+    __ InitializeFieldsWithFiller(a1, t1, a0);
+
+    // Check if we can finalize the instance size.
+    Label finalize;
+    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+    __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
+    __ Branch(USE_DELAY_SLOT, &finalize, eq, a3, Operand(zero_reg));
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ Addu(v0, v0, Operand(kHeapObjectTag));  // In delay slot.
+    __ Ret();
+
+    // Finalize the instance size.
+    __ bind(&finalize);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(v0, a2);
+      __ CallRuntime(Runtime::kFinalizeInstanceSize);
+      __ Pop(v0);
+    }
+    __ Ret();
+  }
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize == 1);
+    __ sll(t0, t0, kPointerSizeLog2 + kSmiTagSize);
+    __ Push(a2, t0);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Pop(a2);
+  }
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ Subu(v0, v0, Operand(kHeapObjectTag));
+  __ lbu(t1, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+  __ Lsa(t1, v0, t1, kPointerSizeLog2);
+  __ jmp(&done_allocate);
+
+  // Fall back to %NewObject.
+  __ bind(&new_object);
+  __ Push(a1, a3);
+  __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make a2 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ Branch(USE_DELAY_SLOT, &loop_entry);
+    __ mov(a2, fp);  // In delay slot.
+    __ bind(&loop);
+    __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ lw(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+    __ Branch(&loop, ne, a1, Operand(a3));
+  }
+
+  // Check if we have rest parameters (only possible if we have an
+  // arguments adaptor frame below the function frame).
+  Label no_rest_parameters;
+  __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&no_rest_parameters, ne, a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Check if the arguments adaptor frame contains more arguments than
+  // specified by the function's internal formal parameter count.
+  Label rest_parameters;
+  __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a1,
+        FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ Subu(a0, a0, Operand(a1));
+  __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
+
+  // Return an empty rest parameter array.
+  __ bind(&no_rest_parameters);
+  {
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- ra : return address
+    // -----------------------------------
+
+    // Allocate an empty rest parameter array.
+    Label allocate, done_allocate;
+    __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the rest parameter array in v0.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
+    __ sw(a1, FieldMemOperand(v0, JSArray::kMapOffset));
+    __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+    __ sw(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
+    __ sw(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
+    __ Move(a1, Smi::FromInt(0));
+    __ Ret(USE_DELAY_SLOT);
+    __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));  // In delay slot
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(Smi::FromInt(JSArray::kSize));
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+    }
+    __ jmp(&done_allocate);
+  }
+
+  __ bind(&rest_parameters);
+  {
+    // Compute the pointer to the first rest parameter (skippping the receiver).
+    __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
+    __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+                            1 * kPointerSize));
+
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- a0 : number of rest parameters (tagged)
+    //  -- a2 : pointer to first rest parameters
+    //  -- ra : return address
+    // -----------------------------------
+
+    // Allocate space for the rest parameter array plus the backing store.
+    Label allocate, done_allocate;
+    __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+    __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
+    __ Allocate(a1, v0, a3, t0, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the elements array in v0.
+    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+    __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
+    __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
+    {
+      Label loop, done_loop;
+      __ sll(at, a0, kPointerSizeLog2 - 1);
+      __ Addu(a1, a3, at);
+      __ bind(&loop);
+      __ Branch(&done_loop, eq, a1, Operand(a3));
+      __ lw(at, MemOperand(a2, 0 * kPointerSize));
+      __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
+      __ Subu(a2, a2, Operand(1 * kPointerSize));
+      __ Addu(a3, a3, Operand(1 * kPointerSize));
+      __ jmp(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Setup the rest parameter array in a3.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
+    __ sw(at, FieldMemOperand(a3, JSArray::kMapOffset));
+    __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+    __ sw(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
+    __ sw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ sw(a0, FieldMemOperand(a3, JSArray::kLengthOffset));
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ Ret(USE_DELAY_SLOT);
+    __ mov(v0, a3);  // In delay slot
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(a1);
+      __ Push(a0, a2, a1);
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+      __ Pop(a0, a2);
+    }
+    __ jmp(&done_allocate);
+  }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2,
+        FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ Lsa(a3, fp, a2, kPointerSizeLog2 - 1);
+  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // a1 : function
+  // a2 : number of parameters (tagged)
+  // a3 : parameters pointer
+  // Registers used over whole function:
+  //  t1 : arguments count (tagged)
+  //  t2 : mapped parameter count (tagged)
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor_frame, eq, a0,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // No adaptor, parameter count = argument count.
+  __ mov(t1, a2);
+  __ Branch(USE_DELAY_SLOT, &try_allocate);
+  __ mov(t2, a2);  // In delay slot.
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ Lsa(t0, t0, t1, 1);
+  __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // t1 = argument count (tagged)
+  // t2 = parameter count (tagged)
+  // Compute the mapped parameter count = min(t2, t1) in t2.
+  __ mov(t2, a2);
+  __ Branch(&try_allocate, le, t2, Operand(t1));
+  __ mov(t2, t1);
+
+  __ bind(&try_allocate);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  // If there are no mapped parameters, we do not need the parameter_map.
+  Label param_map_size;
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
+  __ mov(t5, zero_reg);  // In delay slot: param map size = 0 when t2 == 0.
+  __ sll(t5, t2, 1);
+  __ addiu(t5, t5, kParameterMapHeaderSize);
+  __ bind(&param_map_size);
+
+  // 2. Backing store.
+  __ Lsa(t5, t5, t1, 1);
+  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ Addu(t5, t5, Operand(JSSloppyArgumentsObject::kSize));
+
+  // Do the allocation of all three objects in one go.
+  __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
+
+  // v0 = address of new object(s) (tagged)
+  // a2 = argument count (smi-tagged)
+  // Get the arguments boilerplate from the current native context into t0.
+  const int kNormalOffset =
+      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+  const int kAliasedOffset =
+      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+  __ lw(t0, NativeContextMemOperand());
+  Label skip2_ne, skip2_eq;
+  __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
+  __ lw(t0, MemOperand(t0, kNormalOffset));
+  __ bind(&skip2_ne);
+
+  __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
+  __ lw(t0, MemOperand(t0, kAliasedOffset));
+  __ bind(&skip2_eq);
+
+  // v0 = address of new object (tagged)
+  // a2 = argument count (smi-tagged)
+  // t0 = address of arguments map (tagged)
+  // t2 = mapped parameter count (tagged)
+  __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
+  __ LoadRoot(t5, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(t5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(t5, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  // Set up the callee in-object property.
+  __ AssertNotSmi(a1);
+  __ sw(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(t1);
+  __ sw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+  // Set up the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, t0 will point there, otherwise
+  // it will point to the backing store.
+  __ Addu(t0, v0, Operand(JSSloppyArgumentsObject::kSize));
+  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  // v0 = address of new object (tagged)
+  // a2 = argument count (tagged)
+  // t0 = address of parameter map or backing store (tagged)
+  // t2 = mapped parameter count (tagged)
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  Label skip3;
+  __ Branch(&skip3, ne, t2, Operand(Smi::FromInt(0)));
+  // Move backing store address to a1, because it is
+  // expected there when filling in the unmapped arguments.
+  __ mov(a1, t0);
+  __ bind(&skip3);
+
+  __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0)));
+
+  __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
+  __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
+  __ Addu(t1, t2, Operand(Smi::FromInt(2)));
+  __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+  __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
+  __ Lsa(t1, t0, t2, 1);
+  __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
+  __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+  __ mov(t1, t2);
+  __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+  __ Subu(t5, t5, Operand(t2));
+  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
+  __ Lsa(a1, t0, t1, 1);
+  __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
+
+  // a1 = address of backing store (tagged)
+  // t0 = address of parameter map (tagged)
+  // a0 = temporary scratch (a.o., for address calculation)
+  // t1 = loop variable (tagged)
+  // t3 = the hole value
+  __ jmp(&parameters_test);
+
+  __ bind(&parameters_loop);
+  __ Subu(t1, t1, Operand(Smi::FromInt(1)));
+  __ sll(a0, t1, 1);
+  __ Addu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+  __ Addu(t6, t0, a0);
+  __ sw(t5, MemOperand(t6));
+  __ Subu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+  __ Addu(t6, a1, a0);
+  __ sw(t3, MemOperand(t6));
+  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+  __ bind(&parameters_test);
+  __ Branch(&parameters_loop, ne, t1, Operand(Smi::FromInt(0)));
+
+  // t1 = argument count (tagged).
+  __ lw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+  __ bind(&skip_parameter_map);
+  // v0 = address of new object (tagged)
+  // a1 = address of backing store (tagged)
+  // t1 = argument count (tagged)
+  // t2 = mapped parameter count (tagged)
+  // t5 = scratch
+  // Copy arguments header and remaining slots (if there are any).
+  __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
+  __ sw(t5, FieldMemOperand(a1, FixedArray::kMapOffset));
+  __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset));
+
+  Label arguments_loop, arguments_test;
+  __ sll(t6, t2, 1);
+  __ Subu(a3, a3, Operand(t6));
+  __ jmp(&arguments_test);
+
+  __ bind(&arguments_loop);
+  __ Subu(a3, a3, Operand(kPointerSize));
+  __ lw(t0, MemOperand(a3, 0));
+  __ Lsa(t5, a1, t2, 1);
+  __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
+  __ Addu(t2, t2, Operand(Smi::FromInt(1)));
+
+  __ bind(&arguments_test);
+  __ Branch(&arguments_loop, lt, t2, Operand(t1));
+
+  // Return.
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  // t1 = argument count (tagged)
+  __ bind(&runtime);
+  __ Push(a1, a3, t1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make a2 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ Branch(USE_DELAY_SLOT, &loop_entry);
+    __ mov(a2, fp);  // In delay slot.
+    __ bind(&loop);
+    __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ lw(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+    __ Branch(&loop, ne, a1, Operand(a3));
+  }
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ lw(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a0, MemOperand(a3, StandardFrameConstants::kContextOffset));
+  __ Branch(&arguments_adaptor, eq, a0,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  {
+    __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a0,
+          FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
+    __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+                            1 * kPointerSize));
+  }
+  __ Branch(&arguments_done);
+  __ bind(&arguments_adaptor);
+  {
+    __ lw(a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ Lsa(a2, a3, a0, kPointerSizeLog2 - 1);
+    __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+                            1 * kPointerSize));
+  }
+  __ bind(&arguments_done);
+
+  // ----------- S t a t e -------------
+  //  -- cp : context
+  //  -- a0 : number of rest parameters (tagged)
+  //  -- a2 : pointer to first rest parameters
+  //  -- ra : return address
+  // -----------------------------------
+
+  // Allocate space for the strict arguments object plus the backing store.
+  Label allocate, done_allocate;
+  __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
+  __ Allocate(a1, v0, a3, t0, &allocate, TAG_OBJECT);
+  __ bind(&done_allocate);
+
+  // Setup the elements array in v0.
+  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+  __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+  __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
+  __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
+  {
+    Label loop, done_loop;
+    __ sll(at, a0, kPointerSizeLog2 - 1);
+    __ Addu(a1, a3, at);
+    __ bind(&loop);
+    __ Branch(&done_loop, eq, a1, Operand(a3));
+    __ lw(at, MemOperand(a2, 0 * kPointerSize));
+    __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
+    __ Subu(a2, a2, Operand(1 * kPointerSize));
+    __ Addu(a3, a3, Operand(1 * kPointerSize));
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Setup the strict arguments object in a3.
+  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
+  __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
+  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
+  __ sw(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
+  __ sw(a0, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
+  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a3);  // In delay slot
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(a1);
+    __ Push(a0, a2, a1);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Pop(a0, a2);
+  }
+  __ jmp(&done_allocate);
+}
+
+
 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
   Register context_reg = cp;
   Register slot_reg = a2;
@@ -5157,8 +5342,7 @@
   }
 
   // Load the PropertyCell value at the specified slot.
-  __ sll(at, slot_reg, kPointerSizeLog2);
-  __ Addu(at, at, Operand(context_reg));
+  __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
   __ lw(result_reg, ContextMemOperand(at, 0));
   __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
 
@@ -5196,8 +5380,7 @@
   }
 
   // Load the PropertyCell at the specified slot.
-  __ sll(at, slot_reg, kPointerSizeLog2);
-  __ Addu(at, at, Operand(context_reg));
+  __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
   __ lw(cell_reg, ContextMemOperand(at, 0));
 
   // Load PropertyDetails for the cell (actually only the cell_type and kind).
@@ -5424,11 +5607,10 @@
   __ jmp(&leave_exit_frame);
 }
 
-
 static void CallApiFunctionStubHelper(MacroAssembler* masm,
                                       const ParameterCount& argc,
                                       bool return_first_arg,
-                                      bool call_data_undefined) {
+                                      bool call_data_undefined, bool is_lazy) {
   // ----------- S t a t e -------------
   //  -- a0                  : callee
   //  -- t0                  : call_data
@@ -5464,8 +5646,10 @@
 
   // Save context, callee and call data.
   __ Push(context, callee, call_data);
-  // Load context from callee.
-  __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+  if (!is_lazy) {
+    // Load context from callee.
+    __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+  }
 
   Register scratch = call_data;
   if (!call_data_undefined) {
@@ -5546,7 +5730,7 @@
 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
   bool call_data_undefined = this->call_data_undefined();
   CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
-                            call_data_undefined);
+                            call_data_undefined, false);
 }
 
 
@@ -5554,41 +5738,48 @@
   bool is_store = this->is_store();
   int argc = this->argc();
   bool call_data_undefined = this->call_data_undefined();
+  bool is_lazy = this->is_lazy();
   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined);
+                            call_data_undefined, is_lazy);
 }
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- sp[0]                  : name
-  //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object
+  //  -- sp[0]                        : name
+  //  -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
   //  -- ...
-  //  -- a2                     : api_function_address
+  //  -- a2                           : api_function_address
   // -----------------------------------
 
   Register api_function_address = ApiGetterDescriptor::function_address();
   DCHECK(api_function_address.is(a2));
 
-  __ mov(a0, sp);  // a0 = Handle<Name>
-  __ Addu(a1, a0, Operand(1 * kPointerSize));  // a1 = PCA
+  // v8::PropertyCallbackInfo::args_ array and name handle.
+  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+  // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+  __ mov(a0, sp);                              // a0 = Handle<Name>
+  __ Addu(a1, a0, Operand(1 * kPointerSize));  // a1 = v8::PCI::args_
 
   const int kApiStackSpace = 1;
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
-  // Create PropertyAccessorInfo instance on the stack above the exit frame with
-  // a1 (internal::Object** args_) as the data.
+  // Create v8::PropertyCallbackInfo object on the stack and initialize
+  // it's args_ field.
   __ sw(a1, MemOperand(sp, 1 * kPointerSize));
-  __ Addu(a1, sp, Operand(1 * kPointerSize));  // a1 = AccessorInfo&
-
-  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+  __ Addu(a1, sp, Operand(1 * kPointerSize));  // a1 = v8::PropertyCallbackInfo&
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
+
+  // +3 is to skip prolog, return address and name handle.
+  MemOperand return_value_operand(
+      fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
                            kStackUnwindSpace, kInvalidStackOffset,
-                           MemOperand(fp, 6 * kPointerSize), NULL);
+                           return_value_operand, NULL);
 }
 
 
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 2a144d9..878ba34 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -767,8 +767,7 @@
   __ Addu(scratch1, elements,
       Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
-  __ sll(at, length, 2);
-  __ Addu(array_end, scratch3, at);
+  __ Lsa(array_end, scratch3, length, 2);
 
   // Repurpose registers no longer in use.
   Register hole_lower = elements;
@@ -899,8 +898,7 @@
         FixedDoubleArray::kHeaderSize - kHeapObjectTag
         + Register::kExponentOffset));
   __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
-  __ sll(dst_end, dst_end, 1);
-  __ Addu(dst_end, dst_elements, dst_end);
+  __ Lsa(dst_end, dst_elements, dst_end, 1);
 
   // Allocating heap numbers in the loop below can fail and cause a jump to
   // gc_required. We can't leave a partly initialized FixedArray behind,
@@ -1082,8 +1080,7 @@
   __ And(at, result, Operand(kStringEncodingMask));
   __ Branch(&one_byte, ne, at, Operand(zero_reg));
   // Two-byte string.
-  __ sll(at, index, 1);
-  __ Addu(at, string, at);
+  __ Lsa(at, string, index, 1);
   __ lhu(result, MemOperand(at));
   __ jmp(&done);
   __ bind(&one_byte);
@@ -1156,8 +1153,7 @@
 
   // Must not call ExpConstant() after overwriting temp3!
   __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
-  __ sll(at, temp2, 3);
-  __ Addu(temp3, temp3, Operand(at));
+  __ Lsa(temp3, temp3, temp2, 3);
   __ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
   __ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
   // The first word is loaded is the lower number register.
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index 6ca430a..3afb881 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -142,7 +142,7 @@
     case BC:
     case BALC:
     case POP10:  // beqzalc, bovc, beqc
-    case POP30:  // bnezalc, bvnc, bnec
+    case POP30:  // bnezalc, bnvc, bnec
     case POP66:  // beqzc, jic
     case POP76:  // bnezc, jialc
       return true;
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index 8327501..4914251 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -64,9 +64,13 @@
 #elif defined(FPU_MODE_FP64)
   static const FpuMode kFpuMode = kFP64;
 #elif defined(FPU_MODE_FPXX)
-  static const FpuMode kFpuMode = kFPXX;
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS32R6)
+static const FpuMode kFpuMode = kFPXX;
 #else
-  static const FpuMode kFpuMode = kFP32;
+#error "FPXX is supported only on Mips32R2 and Mips32R6"
+#endif
+#else
+static const FpuMode kFpuMode = kFP32;
 #endif
 
 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
@@ -92,13 +96,9 @@
 #error Unknown endianness
 #endif
 
-#ifndef FPU_MODE_FPXX
-#define IsFp64Mode() \
-  (kFpuMode == kFP64)
-#else
-#define IsFp64Mode() \
-  (CpuFeatures::IsSupported(FP64FPU))
-#endif
+#define IsFp64Mode() (kFpuMode == kFP64)
+#define IsFp32Mode() (kFpuMode == kFP32)
+#define IsFpxxMode() (kFpuMode == kFPXX)
 
 #ifndef _MIPS_ARCH_MIPS32RX
 #define IsMipsArchVariant(check) \
@@ -390,7 +390,7 @@
   POP10 = ADDI,   // beqzalc, bovc, beqc
   POP26 = BLEZL,  // bgezc, blezc, bgec/blec
   POP27 = BGTZL,  // bgtzc, bltzc, bltc/bgtc
-  POP30 = DADDI,  // bnezalc, bvnc, bnec
+  POP30 = DADDI,  // bnezalc, bnvc, bnec
 };
 
 enum SecondaryField : uint32_t {
@@ -794,6 +794,7 @@
   kDontCheckForInexactConversion
 };
 
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
 
 // -----------------------------------------------------------------------------
 // Hints.
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index a9e30de..0caaa4c 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -80,27 +80,6 @@
 }
 
 
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  // Set the register values. The values are not important as there are no
-  // callee saved registers in JavaScript frames, so all registers are
-  // spilled. Registers fp and sp are set to the correct values though.
-
-  for (int i = 0; i < Register::kNumRegisters; i++) {
-    input_->SetRegister(i, i * 4);
-  }
-  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
-  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
-    input_->SetDoubleRegister(i, 0.0);
-  }
-
-  // Fill the frame content from the actual data on the frame.
-  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
-    input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
-  }
-}
-
-
 void Deoptimizer::SetPlatformCompiledStubRegisters(
     FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
   ApiFunction function(descriptor->deoptimization_handler());
@@ -119,8 +98,7 @@
   }
 }
 
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
   // There is no dynamic alignment padding on MIPS in the input frame.
   return false;
 }
@@ -268,8 +246,7 @@
   // a1 = one past the last FrameDescription**.
   __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
   __ lw(t0, MemOperand(a0, Deoptimizer::output_offset()));  // t0 is output_.
-  __ sll(a1, a1, kPointerSizeLog2);  // Count to offset.
-  __ addu(a1, t0, a1);  // a1 = one past the last FrameDescription**.
+  __ Lsa(a1, t0, a1, kPointerSizeLog2);
   __ BranchShort(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index 936514a..7e0a480 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -1500,6 +1500,7 @@
         if (rs_reg >= rt_reg) {
           Format(instr, "bovc  'rs, 'rt, 'imm16s -> 'imm16p4s2");
         } else {
+          DCHECK(rt_reg > 0);
           if (rs_reg == 0) {
             Format(instr, "beqzalc 'rt, 'imm16s -> 'imm16p4s2");
           } else {
@@ -1516,6 +1517,7 @@
         if (rs_reg >= rt_reg) {
           Format(instr, "bnvc  'rs, 'rt, 'imm16s -> 'imm16p4s2");
         } else {
+          DCHECK(rt_reg > 0);
           if (rs_reg == 0) {
             Format(instr, "bnezalc 'rt, 'imm16s -> 'imm16p4s2");
           } else {
diff --git a/src/mips/interface-descriptors-mips.cc b/src/mips/interface-descriptors-mips.cc
index 3f4fb38..fdb43f3 100644
--- a/src/mips/interface-descriptors-mips.cc
+++ b/src/mips/interface-descriptors-mips.cc
@@ -54,20 +54,6 @@
 const Register StringCompareDescriptor::RightRegister() { return a0; }
 
 
-const Register ArgumentsAccessReadDescriptor::index() { return a1; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return a1; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return a2; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return a3; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return a1; }
-
-
 const Register ApiGetterDescriptor::function_address() { return a2; }
 
 
@@ -96,6 +82,32 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {a1, a3};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {a1};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {a1};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {a1};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
 
 void ToNumberDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -113,6 +125,10 @@
 
 
 // static
+const Register ToNameDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
 const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
 
 
@@ -165,13 +181,6 @@
 }
 
 
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a3, a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void CallFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {a1};
@@ -407,6 +416,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+      kInterpreterDispatchTableRegister};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -418,7 +435,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
@@ -430,7 +446,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterCEntryDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 3c866ac..e3544c5 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -159,9 +159,9 @@
                                 Condition cc,
                                 Label* branch) {
   DCHECK(cc == eq || cc == ne);
-  And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
-  Branch(branch, cc, scratch,
-         Operand(ExternalReference::new_space_start(isolate())));
+  const int mask =
+      1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
+  CheckPageFlag(object, scratch, mask, cc, branch);
 }
 
 
@@ -369,6 +369,67 @@
   }
 }
 
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+                                               Register code_entry,
+                                               Register scratch) {
+  const int offset = JSFunction::kCodeEntryOffset;
+
+  // Since a code entry (value) is always in old space, we don't need to update
+  // remembered set. If incremental marking is off, there is nothing for us to
+  // do.
+  if (!FLAG_incremental_marking) return;
+
+  DCHECK(js_function.is(a1));
+  DCHECK(code_entry.is(t0));
+  DCHECK(scratch.is(t1));
+  AssertNotSmi(js_function);
+
+  if (emit_debug_code()) {
+    Addu(scratch, js_function, Operand(offset - kHeapObjectTag));
+    lw(at, MemOperand(scratch));
+    Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
+           Operand(code_entry));
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+
+  CheckPageFlag(code_entry, scratch,
+                MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+  CheckPageFlag(js_function, scratch,
+                MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+  const Register dst = scratch;
+  Addu(dst, js_function, Operand(offset - kHeapObjectTag));
+
+  // Save caller-saved registers. js_function and code_entry are in the
+  // caller-saved register list.
+  DCHECK(kJSCallerSaved & js_function.bit());
+  DCHECK(kJSCallerSaved & code_entry.bit());
+  MultiPush(kJSCallerSaved | ra.bit());
+
+  int argument_count = 3;
+
+  PrepareCallCFunction(argument_count, 0, code_entry);
+
+  mov(a0, js_function);
+  mov(a1, dst);
+  li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+  {
+    AllowExternalCallThatCantCauseGC scope(this);
+    CallCFunction(
+        ExternalReference::incremental_marking_record_write_code_entry_function(
+            isolate()),
+        argument_count);
+  }
+
+  // Restore caller-saved registers.
+  MultiPop(kJSCallerSaved | ra.bit());
+
+  bind(&done);
+}
 
 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
                                          Register address,
@@ -499,16 +560,14 @@
   //
   // hash = ~hash + (hash << 15);
   nor(scratch, reg0, zero_reg);
-  sll(at, reg0, 15);
-  addu(reg0, scratch, at);
+  Lsa(reg0, scratch, reg0, 15);
 
   // hash = hash ^ (hash >> 12);
   srl(at, reg0, 12);
   xor_(reg0, reg0, at);
 
   // hash = hash + (hash << 2);
-  sll(at, reg0, 2);
-  addu(reg0, reg0, at);
+  Lsa(reg0, reg0, reg0, 2);
 
   // hash = hash ^ (hash >> 4);
   srl(at, reg0, 4);
@@ -516,8 +575,7 @@
 
   // hash = hash * 2057;
   sll(scratch, reg0, 11);
-  sll(at, reg0, 3);
-  addu(reg0, reg0, at);
+  Lsa(reg0, reg0, reg0, 3);
   addu(reg0, reg0, scratch);
 
   // hash = hash ^ (hash >> 16);
@@ -577,12 +635,10 @@
 
     // Scale the index by multiplying by the element size.
     DCHECK(SeededNumberDictionary::kEntrySize == 3);
-    sll(at, reg2, 1);  // 2x.
-    addu(reg2, reg2, at);  // reg2 = reg2 * 3.
+    Lsa(reg2, reg2, reg2, 1);  // reg2 = reg2 * 3.
 
     // Check if the key is identical to the name.
-    sll(at, reg2, kPointerSizeLog2);
-    addu(reg2, elements, at);
+    Lsa(reg2, elements, reg2, kPointerSizeLog2);
 
     lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
     if (i != kNumberDictionaryProbes - 1) {
@@ -1322,6 +1378,11 @@
   mtc1(t8, fd);
 }
 
+void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
+                                FPURegister scratch) {
+  Trunc_uw_s(fs, t8, scratch);
+  mtc1(t8, fd);
+}
 
 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
   if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
@@ -1399,21 +1460,54 @@
   bind(&done);
 }
 
+void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
+                                FPURegister scratch) {
+  DCHECK(!fd.is(scratch));
+  DCHECK(!rs.is(at));
+
+  // Load 2^31 into scratch as its float representation.
+  li(at, 0x4F000000);
+  mtc1(at, scratch);
+  // Test if scratch > fd.
+  // If fd < 2^31 we can convert it normally.
+  Label simple_convert;
+  BranchF32(&simple_convert, NULL, lt, fd, scratch);
+
+  // First we subtract 2^31 from fd, then trunc it to rs
+  // and add 2^31 to rs.
+  sub_s(scratch, fd, scratch);
+  trunc_w_s(scratch, scratch);
+  mfc1(rs, scratch);
+  Or(rs, rs, 1 << 31);
+
+  Label done;
+  Branch(&done);
+  // Simple conversion.
+  bind(&simple_convert);
+  trunc_w_s(scratch, fd);
+  mfc1(rs, scratch);
+
+  bind(&done);
+}
 
 void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
-  if (IsFp64Mode()) {
-    mthc1(rt, fs);
-  } else {
+  if (IsFp32Mode()) {
     mtc1(rt, fs.high());
+  } else {
+    DCHECK(IsFp64Mode() || IsFpxxMode());
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+    mthc1(rt, fs);
   }
 }
 
 
 void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
-  if (IsFp64Mode()) {
-    mfhc1(rt, fs);
-  } else {
+  if (IsFp32Mode()) {
     mfc1(rt, fs.high());
+  } else {
+    DCHECK(IsFp64Mode() || IsFpxxMode());
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+    mfhc1(rt, fs);
   }
 }
 
@@ -1619,13 +1713,15 @@
 
 
 void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
-  if (IsFp64Mode()) {
+  if (IsFp32Mode()) {
+    mtc1(src_low, dst);
+  } else {
+    DCHECK(IsFp64Mode() || IsFpxxMode());
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
     DCHECK(!src_low.is(at));
     mfhc1(at, dst);
     mtc1(src_low, dst);
     mthc1(at, dst);
-  } else {
-    mtc1(src_low, dst);
   }
 }
 
@@ -3271,7 +3367,7 @@
     return;
   }
 
-  DCHECK(!AreAliased(result, scratch1, scratch2, t9));
+  DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
 
   // Make object size into bytes.
   if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3357,8 +3453,8 @@
 
   // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
   // is not specified. Other registers must not overlap.
-  DCHECK(!AreAliased(object_size, result, scratch, t9));
-  DCHECK(!AreAliased(result_end, result, scratch, t9));
+  DCHECK(!AreAliased(object_size, result, scratch, t9, at));
+  DCHECK(!AreAliased(result_end, result, scratch, t9, at));
   DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
 
   // Check relative positions of allocation top and limit addresses.
@@ -3412,8 +3508,7 @@
   // to calculate the new top. Object size may be in words so a shift is
   // required to get the number of bytes.
   if ((flags & SIZE_IN_WORDS) != 0) {
-    sll(result_end, object_size, kPointerSizeLog2);
-    Addu(result_end, result, result_end);
+    Lsa(result_end, result, object_size, kPointerSizeLog2);
   } else {
     Addu(result_end, result, Operand(object_size));
   }
@@ -3775,8 +3870,7 @@
   lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
 
   bind(&have_double_value);
-  sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
-  Addu(scratch1, scratch1, elements_reg);
+  Lsa(scratch1, elements_reg, key_reg, kDoubleSizeLog2 - kSmiTagSize);
   sw(mantissa_reg,
       FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
           + kHoleNanLower32Offset));
@@ -3802,8 +3896,7 @@
   Addu(scratch1, elements_reg,
       Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
               elements_offset));
-  sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
-  Addu(scratch1, scratch1, scratch2);
+  Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
   // scratch1 is now effective address of the double element
 
   Register untagged_value = scratch2;
@@ -4059,7 +4152,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -4579,18 +4672,6 @@
 }
 
 
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                                   const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  DCHECK(flag == JUMP_FUNCTION || has_frame());
-
-  // Fake a parameter count to avoid emitting code to do the check.
-  ParameterCount expected(0);
-  LoadNativeContextSlot(native_context_index, a1);
-  InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
                                 Register scratch1, Register scratch2) {
   if (FLAG_native_code_counters && counter->Enabled()) {
@@ -4687,9 +4768,9 @@
     // We don't actually want to generate a pile of code for this, so just
     // claim there is a stack frame, without generating one.
     FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   } else {
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   }
   // Will not return here.
   if (is_trampoline_pool_blocked()) {
@@ -4945,8 +5026,7 @@
     if (argument_count_is_length) {
       addu(sp, sp, argument_count);
     } else {
-      sll(t8, argument_count, kPointerSizeLog2);
-      addu(sp, sp, t8);
+      Lsa(sp, sp, argument_count, kPointerSizeLog2, t8);
     }
   }
 
@@ -5160,6 +5240,17 @@
 }
 
 
+void MacroAssembler::AssertReceiver(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    SmiTst(object, t8);
+    Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
+    GetObjectType(object, t8, t8);
+    Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
+  }
+}
+
+
 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
                                                      Register scratch) {
   if (emit_debug_code()) {
@@ -5473,8 +5564,7 @@
   Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
   Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
-  sll(t8, t8, kPointerSizeLog2);
-  Addu(bitmap_reg, bitmap_reg, t8);
+  Lsa(bitmap_reg, bitmap_reg, t8, kPointerSizeLog2, t8);
   li(t8, Operand(1));
   sllv(mask_reg, t8, mask_reg);
 }
@@ -5533,7 +5623,8 @@
 }
 
 
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+  Register null_value = t1;
   Register  empty_fixed_array_value = t2;
   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   Label next, start;
@@ -5547,6 +5638,7 @@
   Branch(
       call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
 
+  LoadRoot(null_value, Heap::kNullValueRootIndex);
   jmp(&start);
 
   bind(&next);
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 4f6a3c8..05a8fec 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -15,6 +15,7 @@
 // Give alias names to registers for calling conventions.
 const Register kReturnRegister0 = {Register::kCode_v0};
 const Register kReturnRegister1 = {Register::kCode_v1};
+const Register kReturnRegister2 = {Register::kCode_a0};
 const Register kJSFunctionRegister = {Register::kCode_a1};
 const Register kContextRegister = {Register::kCpRegister};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
@@ -207,6 +208,11 @@
               Heap::RootListIndex index,
               BranchDelaySlot bdslot = PROTECT);
 
+  // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+  // functor/function with 'Label *func(size_t index)' declaration.
+  template <typename Func>
+  void GenerateSwitchTable(Register index, size_t case_count,
+                           Func GetLabelFunction);
 #undef COND_ARGS
 
   // Emit code to discard a non-negative number of pointer-sized elements
@@ -357,7 +363,7 @@
   void JumpIfNotInNewSpace(Register object,
                            Register scratch,
                            Label* branch) {
-    InNewSpace(object, scratch, ne, branch);
+    InNewSpace(object, scratch, eq, branch);
   }
 
   // Check if object is in new space.  Jumps if the object is in new space.
@@ -365,7 +371,7 @@
   void JumpIfInNewSpace(Register object,
                         Register scratch,
                         Label* branch) {
-    InNewSpace(object, scratch, eq, branch);
+    InNewSpace(object, scratch, ne, branch);
   }
 
   // Check if an object has a given incremental marking color.
@@ -427,6 +433,11 @@
                      pointers_to_here_check_for_value);
   }
 
+  // Notify the garbage collector that we wrote a code entry into a
+  // JSFunction. Only scratch is clobbered by the operation.
+  void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+                                 Register scratch);
+
   void RecordWriteForMap(
       Register object,
       Register map,
@@ -771,6 +782,10 @@
   // Convert unsigned word to double.
   void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
 
+  // Convert single to unsigned word.
+  void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
+  void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
+
   // Convert double to unsigned word.
   void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
   void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
@@ -1054,6 +1069,11 @@
                      Register map,
                      Register type_reg);
 
+  void GetInstanceType(Register object_map, Register object_instance_type) {
+    lbu(object_instance_type,
+        FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+  }
+
   // Check if a map for a JSObject indicates that the object has fast elements.
   // Jump to the specified label if it does not.
   void CheckFastElements(Register map,
@@ -1327,10 +1347,6 @@
   void JumpToExternalReference(const ExternalReference& builtin,
                                BranchDelaySlot bd = PROTECT);
 
-  // Invoke specified builtin JavaScript function.
-  void InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                     const CallWrapper& call_wrapper = NullCallWrapper());
-
   struct Unresolved {
     int pc;
     uint32_t flags;  // See Bootstrapper::FixupFlags decoders/encoders.
@@ -1486,6 +1502,9 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+  void AssertReceiver(Register object);
+
   // Abort execution if argument is not undefined or an AllocationSite, enabled
   // via --debug-code.
   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1598,7 +1617,7 @@
 
   // Expects object in a0 and returns map with validated enum cache
   // in a0.  Assumes that any other register can be used as a scratch.
-  void CheckEnumCache(Register null_value, Label* call_runtime);
+  void CheckEnumCache(Label* call_runtime);
 
   // AllocationMemento support. Arrays may have an associated
   // AllocationMemento object that can be checked for in order to pretransition
@@ -1684,9 +1703,8 @@
                            Register scratch2);
 
   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cond,  // eq for new space, ne otherwise.
+  void InNewSpace(Register object, Register scratch,
+                  Condition cond,  // ne for new space, eq otherwise.
                   Label* branch);
 
   // Helper for finding the mark bits for an address.  Afterwards, the
@@ -1749,7 +1767,29 @@
   FlushICache flush_cache_;  // Whether to flush the I cache after patching.
 };
 
-
+template <typename Func>
+void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
+                                         Func GetLabelFunction) {
+  if (kArchVariant >= kMips32r6) {
+    BlockTrampolinePoolFor(case_count + 5);
+    addiupc(at, 5);
+    lsa(at, at, index, kPointerSizeLog2);
+    lw(at, MemOperand(at));
+  } else {
+    Label here;
+    BlockTrampolinePoolFor(case_count + 6);
+    bal(&here);
+    sll(at, index, kPointerSizeLog2);  // Branch delay slot.
+    bind(&here);
+    addu(at, at, ra);
+    lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+  }
+  jr(at);
+  nop();  // Branch delay slot nop.
+  for (size_t index = 0; index < case_count; ++index) {
+    dd(GetLabelFunction(index));
+  }
+}
 
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index aa4224a..0c91cb5 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -16,6 +16,7 @@
 #include "src/mips/constants-mips.h"
 #include "src/mips/simulator-mips.h"
 #include "src/ostreams.h"
+#include "src/runtime/runtime-utils.h"
 
 
 // Only build the simulator if not compiling for real MIPS hardware.
@@ -590,7 +591,8 @@
           HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
           int value = *cur;
           Heap* current_heap = sim_->isolate_->heap();
-          if (((value & 1) == 0) || current_heap->Contains(obj)) {
+          if (((value & 1) == 0) ||
+              current_heap->ContainsSlow(obj->address())) {
             PrintF(" (");
             if ((value & 1) == 0) {
               PrintF("smi %d", value / 2);
@@ -1970,6 +1972,10 @@
                                         int32_t arg4,
                                         int32_t arg5);
 
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
+                                                   int32_t arg2, int32_t arg3,
+                                                   int32_t arg4);
+
 // These prototypes handle the four types of FP calls.
 typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
 typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
@@ -2181,7 +2187,29 @@
       SimulatorRuntimeProfilingGetterCall target =
           reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
       target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+    } else if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
+      // builtin call returning ObjectTriple.
+      SimulatorRuntimeTripleCall target =
+          reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF(
+            "Call to host triple returning runtime function %p "
+            "args %08x, %08x, %08x, %08x, %08x\n",
+            FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+      }
+      // arg0 is a hidden argument pointing to the return location, so don't
+      // pass it to the target function.
+      ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+      }
+      // Return is passed back in address pointed to by hidden first argument.
+      ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
+      *sim_result = result;
+      set_register(v0, arg0);
     } else {
+      DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+             redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
       SimulatorRuntimeCall target =
                   reinterpret_cast<SimulatorRuntimeCall>(external);
       if (::v8::internal::FLAG_trace_sim) {
@@ -2320,6 +2348,91 @@
            static_cast<int>(e));
 }
 
+// Min/Max template functions for Double and Single arguments.
+
+template <typename T>
+static T FPAbs(T a);
+
+template <>
+double FPAbs<double>(double a) {
+  return fabs(a);
+}
+
+template <>
+float FPAbs<float>(float a) {
+  return fabsf(a);
+}
+
+template <typename T>
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+  if (std::isnan(a) && std::isnan(b)) {
+    result = a;
+  } else if (std::isnan(a)) {
+    result = b;
+  } else if (std::isnan(b)) {
+    result = a;
+  } else if (b == a) {
+    // Handle -0.0 == 0.0 case.
+    // std::signbit() returns int 0 or 1 so substracting MaxMinKind::kMax
+    // negates the result.
+    result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+  } else {
+    return false;
+  }
+  return true;
+}
+
+template <typename T>
+static T FPUMin(T a, T b) {
+  T result;
+  if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+    return result;
+  } else {
+    return b < a ? b : a;
+  }
+}
+
+template <typename T>
+static T FPUMax(T a, T b) {
+  T result;
+  if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+    return result;
+  } else {
+    return b > a ? b : a;
+  }
+}
+
+template <typename T>
+static T FPUMinA(T a, T b) {
+  T result;
+  if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+    if (FPAbs(a) < FPAbs(b)) {
+      result = a;
+    } else if (FPAbs(b) < FPAbs(a)) {
+      result = b;
+    } else {
+      result = a < b ? a : b;
+    }
+  }
+  return result;
+}
+
+template <typename T>
+static T FPUMaxA(T a, T b) {
+  T result;
+  if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+    if (FPAbs(a) > FPAbs(b)) {
+      result = a;
+    } else if (FPAbs(b) > FPAbs(a)) {
+      result = b;
+    } else {
+      result = a > b ? a : b;
+    }
+  }
+  return result;
+}
+
+// Handle execution based on instruction types.
 
 void Simulator::DecodeTypeRegisterDRsType() {
   double ft, fs, fd;
@@ -2415,72 +2528,19 @@
     }
     case MIN:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      fs = get_fpu_register_double(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else {
-        set_fpu_register_double(fd_reg(), (fs >= ft) ? ft : fs);
-      }
-      break;
-    case MINA:
-      DCHECK(IsMipsArchVariant(kMips32r6));
-      fs = get_fpu_register_double(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else {
-        double result;
-        if (fabs(fs) > fabs(ft)) {
-          result = ft;
-        } else if (fabs(fs) < fabs(ft)) {
-          result = fs;
-        } else {
-          result = (fs < ft ? fs : ft);
-        }
-        set_fpu_register_double(fd_reg(), result);
-      }
-      break;
-    case MAXA:
-      DCHECK(IsMipsArchVariant(kMips32r6));
-      fs = get_fpu_register_double(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else {
-        double result;
-        if (fabs(fs) < fabs(ft)) {
-          result = ft;
-        } else if (fabs(fs) > fabs(ft)) {
-          result = fs;
-        } else {
-          result = (fs > ft ? fs : ft);
-        }
-        set_fpu_register_double(fd_reg(), result);
-      }
+      set_fpu_register_double(fd_reg(), FPUMin(ft, fs));
       break;
     case MAX:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      fs = get_fpu_register_double(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else {
-        set_fpu_register_double(fd_reg(), (fs <= ft) ? ft : fs);
-      }
+      set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
       break;
+    case MINA:
+      DCHECK(IsMipsArchVariant(kMips32r6));
+      set_fpu_register_double(fd_reg(), FPUMinA(ft, fs));
+      break;
+    case MAXA:
+      DCHECK(IsMipsArchVariant(kMips32r6));
+      set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
       break;
     case ADD_D:
       set_fpu_register_double(fd_reg(), fs + ft);
@@ -3166,71 +3226,19 @@
     }
     case MIN:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      fs = get_fpu_register_float(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else {
-        set_fpu_register_float(fd_reg(), (fs >= ft) ? ft : fs);
-      }
+      set_fpu_register_float(fd_reg(), FPUMin(ft, fs));
       break;
     case MAX:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      fs = get_fpu_register_float(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else {
-        set_fpu_register_float(fd_reg(), (fs <= ft) ? ft : fs);
-      }
+      set_fpu_register_float(fd_reg(), FPUMax(ft, fs));
       break;
     case MINA:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      fs = get_fpu_register_float(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else {
-        float result;
-        if (fabs(fs) > fabs(ft)) {
-          result = ft;
-        } else if (fabs(fs) < fabs(ft)) {
-          result = fs;
-        } else {
-          result = (fs < ft ? fs : ft);
-        }
-        set_fpu_register_float(fd_reg(), result);
-      }
+      set_fpu_register_float(fd_reg(), FPUMinA(ft, fs));
       break;
     case MAXA:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      fs = get_fpu_register_float(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else {
-        float result;
-        if (fabs(fs) < fabs(ft)) {
-          result = ft;
-        } else if (fabs(fs) > fabs(ft)) {
-          result = fs;
-        } else {
-          result = (fs > ft ? fs : ft);
-        }
-        set_fpu_register_float(fd_reg(), result);
-      }
+      set_fpu_register_float(fd_reg(), FPUMaxA(ft, fs));
       break;
     case CVT_L_S: {
       if (IsFp64Mode()) {
@@ -3379,7 +3387,11 @@
       set_register(rt_reg(), get_fpu_register_word(fs_reg()));
       break;
     case MFHC1:
-      set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
+      if (IsFp64Mode()) {
+        set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
+      } else {
+        set_register(rt_reg(), get_fpu_register_word(fs_reg() + 1));
+      }
       break;
     case CTC1: {
       // At the moment only FCSR is supported.
@@ -3399,7 +3411,11 @@
       set_fpu_register_word(fs_reg(), registers_[rt_reg()]);
       break;
     case MTHC1:
-      set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]);
+      if (IsFp64Mode()) {
+        set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]);
+      } else {
+        set_fpu_register_word(fs_reg() + 1, registers_[rt_reg()]);
+      }
       break;
     case S: {
       DecodeTypeRegisterSRsType();
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 8efe0bb..e1c42fd 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -359,13 +359,13 @@
 
   // Compact branch guard.
   void CheckForbiddenSlot(int32_t current_pc) {
-    Instruction* instr_aftter_compact_branch =
+    Instruction* instr_after_compact_branch =
         reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
-    if (instr_aftter_compact_branch->IsForbiddenInBranchDelay()) {
+    if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
       V8_Fatal(__FILE__, __LINE__,
                "Error: Unexpected instruction 0x%08x immediately after a "
                "compact branch instruction.",
-               *reinterpret_cast<uint32_t*>(instr_aftter_compact_branch));
+               *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
     }
   }
 
diff --git a/src/mips64/assembler-mips64-inl.h b/src/mips64/assembler-mips64-inl.h
index 09436ed..37ee3a6 100644
--- a/src/mips64/assembler-mips64-inl.h
+++ b/src/mips64/assembler-mips64-inl.h
@@ -213,8 +213,8 @@
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
       target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target));
   }
 }
 
@@ -282,10 +282,8 @@
   Address address = cell->address() + Cell::kValueOffset;
   Memory::Address_at(pc_) = address;
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
-    // TODO(1550) We are passing NULL as a slot because cell can never be on
-    // evacuation candidate.
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), NULL, cell);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+                                                                  cell);
   }
 }
 
@@ -349,28 +347,6 @@
 }
 
 
-bool RelocInfo::IsPatchedReturnSequence() {
-  Instr instr0 = Assembler::instr_at(pc_);  // lui.
-  Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);  // ori.
-  Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);  // dsll.
-  Instr instr3 = Assembler::instr_at(pc_ + 3 * Assembler::kInstrSize);  // ori.
-  Instr instr4 = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize);  // jalr.
-
-  bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
-                         (instr1 & kOpcodeMask) == ORI &&
-                         (instr2 & kFunctionFieldMask) == DSLL &&
-                         (instr3 & kOpcodeMask) == ORI &&
-                         (instr4 & kFunctionFieldMask) == JALR);
-  return patched_return;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
-  Instr current_instr = Assembler::instr_at(pc_);
-  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/mips64/assembler-mips64.cc b/src/mips64/assembler-mips64.cc
index 9c313a1..f0d3eba 100644
--- a/src/mips64/assembler-mips64.cc
+++ b/src/mips64/assembler-mips64.cc
@@ -3270,10 +3270,9 @@
         bc(&after_pool);
       } else {
         b(&after_pool);
-        nop();
       }
+      nop();
 
-      EmitForbiddenSlotInstruction();
       int pool_start = pc_offset();
       for (int i = 0; i < unbound_labels_count_; i++) {
         { BlockGrowBufferScope block_buf_growth(this);
diff --git a/src/mips64/assembler-mips64.h b/src/mips64/assembler-mips64.h
index f8d315d..bf2285a 100644
--- a/src/mips64/assembler-mips64.h
+++ b/src/mips64/assembler-mips64.h
@@ -306,6 +306,8 @@
 const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
 const FPUControlRegister FCSR = { kFCSRRegister };
 
+// TODO(mips64) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
 
 // -----------------------------------------------------------------------------
 // Machine instruction Operands.
@@ -1092,7 +1094,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, const SourcePosition position);
+  void RecordDeoptReason(const int reason, int raw_position);
 
   static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
                                        intptr_t pc_delta);
@@ -1272,7 +1274,6 @@
   void EmitForbiddenSlotInstruction() {
     if (IsPrevInstrCompactBranch()) {
       nop();
-      ClearCompactBranchState();
     }
   }
 
diff --git a/src/mips64/builtins-mips64.cc b/src/mips64/builtins-mips64.cc
index 3a9980b..1d8d5d3 100644
--- a/src/mips64/builtins-mips64.cc
+++ b/src/mips64/builtins-mips64.cc
@@ -141,6 +141,109 @@
 
 
 // static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments
+  //  -- ra                 : return address
+  //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- sp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in a1 and the double value in f0.
+  __ LoadRoot(a1, root_index);
+  __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+  __ mov(a3, a0);
+
+  Label done_loop, loop;
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ Dsubu(a0, a0, Operand(1));
+    __ Branch(&done_loop, lt, a0, Operand(zero_reg));
+
+    // Load the next parameter tagged value into a2.
+    __ Dlsa(at, sp, a0, kPointerSizeLog2);
+    __ ld(a2, MemOperand(at));
+
+    // Load the double value of the parameter into f2, maybe converting the
+    // parameter to a number first using the ToNumberStub if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(a2, &convert_smi);
+    __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
+    __ JumpIfRoot(a4, Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumberStub to convert it.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(a0);
+      __ SmiTag(a3);
+      __ Push(a0, a1, a3);
+      __ mov(a0, a2);
+      ToNumberStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(a2, v0);
+      __ Pop(a0, a1, a3);
+      {
+        // Restore the double accumulator value (f0).
+        Label restore_smi, done_restore;
+        __ JumpIfSmi(a1, &restore_smi);
+        __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+        __ jmp(&done_restore);
+        __ bind(&restore_smi);
+        __ SmiToDoubleFPURegister(a1, f0, a4);
+        __ bind(&done_restore);
+      }
+      __ SmiUntag(a3);
+      __ SmiUntag(a0);
+    }
+    __ jmp(&convert);
+    __ bind(&convert_number);
+    __ ldc1(f2, FieldMemOperand(a2, HeapNumber::kValueOffset));
+    __ jmp(&done_convert);
+    __ bind(&convert_smi);
+    __ SmiToDoubleFPURegister(a2, f2, a4);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (f0) and the next parameter value on the right hand side (f2).
+    Label compare_equal, compare_nan, compare_swap;
+    __ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
+    __ BranchF(&compare_swap, nullptr, cc, f0, f2);
+    __ Branch(&loop);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ bind(&compare_equal);
+    __ FmoveHigh(a4, reg);
+    // Make a4 unsigned.
+    __ dsll32(a4, a4, 0);
+    __ Branch(&loop, ne, a4, Operand(0x8000000000000000));
+
+    // Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ mov_d(f0, f2);
+    __ mov(a1, a2);
+    __ jmp(&loop);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    __ LoadRoot(a1, Heap::kNanValueRootIndex);
+    __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+    __ jmp(&loop);
+  }
+
+  __ bind(&done_loop);
+  __ Dlsa(sp, sp, a3, kPointerSizeLog2);
+  __ mov(v0, a1);
+  __ DropAndRet(1);
+}
+
+// static
 void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0                     : number of arguments
@@ -156,8 +259,7 @@
   {
     __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
     __ Dsubu(a0, a0, Operand(1));
-    __ dsll(a0, a0, kPointerSizeLog2);
-    __ Daddu(sp, a0, sp);
+    __ Dlsa(sp, sp, a0, kPointerSizeLog2);
     __ ld(a0, MemOperand(sp));
     __ Drop(2);
   }
@@ -192,8 +294,7 @@
     Label no_arguments, done;
     __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
     __ Dsubu(a0, a0, Operand(1));
-    __ dsll(a0, a0, kPointerSizeLog2);
-    __ Daddu(sp, a0, sp);
+    __ Dlsa(sp, sp, a0, kPointerSizeLog2);
     __ ld(a0, MemOperand(sp));
     __ Drop(2);
     __ jmp(&done);
@@ -232,8 +333,9 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(a0, a1, a3);  // first argument, constructor, new target
-    __ CallRuntime(Runtime::kNewObject);
+    __ Push(a0);
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(a0);
   }
   __ Ret(USE_DELAY_SLOT);
@@ -257,8 +359,7 @@
   {
     __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
     __ Dsubu(a0, a0, Operand(1));
-    __ dsll(a0, a0, kPointerSizeLog2);
-    __ Daddu(sp, a0, sp);
+    __ Dlsa(sp, sp, a0, kPointerSizeLog2);
     __ ld(a0, MemOperand(sp));
     __ Drop(2);
   }
@@ -319,8 +420,7 @@
     Label no_arguments, done;
     __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
     __ Dsubu(a0, a0, Operand(1));
-    __ dsll(a0, a0, kPointerSizeLog2);
-    __ Daddu(sp, a0, sp);
+    __ Dlsa(sp, sp, a0, kPointerSizeLog2);
     __ ld(a0, MemOperand(sp));
     __ Drop(2);
     __ jmp(&done);
@@ -361,33 +461,15 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(a0, a1, a3);  // first argument, constructor, new target
-    __ CallRuntime(Runtime::kNewObject);
+    __ Push(a0);
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(a0);
   }
   __ Ret(USE_DELAY_SLOT);
   __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));  // In delay slot.
 }
 
-
-static void CallRuntimePassFunction(
-    MacroAssembler* masm, Runtime::FunctionId function_id) {
-  // ----------- S t a t e -------------
-  //  -- a1 : target function (preserved for callee)
-  //  -- a3 : new target (preserved for callee)
-  // -----------------------------------
-
-  FrameScope scope(masm, StackFrame::INTERNAL);
-  // Push a copy of the function onto the stack.
-  // Push a copy of the target function and the new target.
-  __ Push(a1, a3, a1);
-
-  __ CallRuntime(function_id, 1);
-  // Restore target function and new target.
-  __ Pop(a1, a3);
-}
-
-
 static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
   __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
   __ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
@@ -395,8 +477,26 @@
   __ Jump(at);
 }
 
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argument count (preserved for callee)
+  //  -- a1 : target function (preserved for callee)
+  //  -- a3 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push a copy of the function onto the stack.
+    // Push a copy of the target function and the new target.
+    __ SmiTag(a0);
+    __ Push(a0, a1, a3, a1);
 
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+    __ CallRuntime(function_id, 1);
+    // Restore target function and new target.
+    __ Pop(a0, a1, a3);
+    __ SmiUntag(a0);
+  }
+
   __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Jump(at);
 }
@@ -412,8 +512,7 @@
   __ LoadRoot(a4, Heap::kStackLimitRootIndex);
   __ Branch(&ok, hs, sp, Operand(a4));
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
 
   __ bind(&ok);
   GenerateTailCallToSharedCode(masm);
@@ -422,7 +521,8 @@
 
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
-                                           bool create_implicit_receiver) {
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- a0     : number of arguments
   //  -- a1     : constructor function
@@ -444,143 +544,17 @@
     __ Push(a2, a0);
 
     if (create_implicit_receiver) {
-      // Try to allocate the object without transitioning into C code. If any of
-      // the preconditions is not met, the code bails out to the runtime call.
-      Label rt_call, allocated;
-      if (FLAG_inline_new) {
-        // Verify that the new target is a JSFunction.
-        __ GetObjectType(a3, a5, a4);
-        __ Branch(&rt_call, ne, a4, Operand(JS_FUNCTION_TYPE));
-
-        // Load the initial map and verify that it is in fact a map.
-        // a3: new target
-        __ ld(a2,
-              FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
-        __ JumpIfSmi(a2, &rt_call);
-        __ GetObjectType(a2, t1, t0);
-        __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE));
-
-        // Fall back to runtime if the expected base constructor and base
-        // constructor differ.
-        __ ld(a5, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
-        __ Branch(&rt_call, ne, a1, Operand(a5));
-
-        // Check that the constructor is not constructing a JSFunction (see
-        // comments in Runtime_NewObject in runtime.cc). In which case the
-        // initial map's instance type would be JS_FUNCTION_TYPE.
-        // a1: constructor function
-        // a2: initial map
-        __ lbu(t1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
-        __ Branch(&rt_call, eq, t1, Operand(JS_FUNCTION_TYPE));
-
-        // Now allocate the JSObject on the heap.
-        // a1: constructor function
-        // a2: initial map
-        __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-        __ Allocate(a4, t0, a4, t2, &rt_call, SIZE_IN_WORDS);
-
-        // Allocated the JSObject, now initialize the fields. Map is set to
-        // initial map and properties and elements are set to empty fixed array.
-        // a1: constructor function
-        // a2: initial map
-        // a3: object size
-        // t0: JSObject (not HeapObject tagged - the actual address).
-        // a4: start of next object
-        __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
-        __ mov(t1, t0);
-        STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
-        __ sd(a2, MemOperand(t1, JSObject::kMapOffset));
-        STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
-        __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset));
-        STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
-        __ sd(t2, MemOperand(t1, JSObject::kElementsOffset));
-        STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
-        __ Daddu(t1, t1, Operand(3 * kPointerSize));
-
-        // Add the object tag to make the JSObject real, so that we can continue
-        // and jump into the continuation code at any time from now on.
-        __ Daddu(t0, t0, Operand(kHeapObjectTag));
-
-        // Fill all the in-object properties with appropriate filler.
-        // t0: JSObject (tagged)
-        // t1: First in-object property of JSObject (not tagged)
-        __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
-
-        if (!is_api_function) {
-          Label no_inobject_slack_tracking;
-
-          MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
-          // Check if slack tracking is enabled.
-          __ lwu(t2, bit_field3);
-          __ DecodeField<Map::ConstructionCounter>(a6, t2);
-          // a6: slack tracking counter
-          __ Branch(&no_inobject_slack_tracking, lt, a6,
-                    Operand(Map::kSlackTrackingCounterEnd));
-          // Decrease generous allocation count.
-          __ Dsubu(t2, t2, Operand(1 << Map::ConstructionCounter::kShift));
-          __ sw(t2, bit_field3);
-
-          // Allocate object with a slack.
-          __ lbu(a0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
-          __ dsll(a0, a0, kPointerSizeLog2);
-          __ dsubu(a0, a4, a0);
-          // a0: offset of first field after pre-allocated fields
-          if (FLAG_debug_code) {
-            __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, t1,
-                      Operand(a0));
-          }
-          __ InitializeFieldsWithFiller(t1, a0, t3);
-
-          // To allow truncation fill the remaining fields with one pointer
-          // filler map.
-          __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex);
-          __ InitializeFieldsWithFiller(t1, a4, t3);
-
-          // a6: slack tracking counter value before decreasing.
-          __ Branch(&allocated, ne, a6, Operand(Map::kSlackTrackingCounterEnd));
-
-          // Push the constructor, new_target and the object to the stack,
-          // and then the initial map as an argument to the runtime call.
-          __ Push(a1, a3, t0, a2);
-          __ CallRuntime(Runtime::kFinalizeInstanceSize);
-          __ Pop(a1, a3, t0);
-
-          // Continue with JSObject being successfully allocated.
-          // a1: constructor function
-          // a3: new target
-          // t0: JSObject
-          __ jmp(&allocated);
-
-          __ bind(&no_inobject_slack_tracking);
-        }
-
-        __ InitializeFieldsWithFiller(t1, a4, t3);
-
-        // Continue with JSObject being successfully allocated.
-        // a1: constructor function
-        // a3: new target
-        // t0: JSObject
-        __ jmp(&allocated);
-      }
-
-      // Allocate the new receiver object using the runtime call.
-      // a1: constructor function
-      // a3: new target
-      __ bind(&rt_call);
-
-      // Push the constructor and new_target twice, second pair as arguments
-      // to the runtime call.
-      __ Push(a1, a3, a1, a3);  // constructor function, new target
-      __ CallRuntime(Runtime::kNewObject);
+      __ Push(a1, a3);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
       __ mov(t0, v0);
       __ Pop(a1, a3);
 
-      // Receiver for constructor call allocated.
-      // a1: constructor function
-      // a3: new target
-      // t0: JSObject
-      __ bind(&allocated);
-
+      // ----------- S t a t e -------------
+      // -- a1: constructor function
+      // -- a3: new target
+      // -- t0: newly allocated object
+      // -----------------------------------
       __ ld(a0, MemOperand(sp));
     }
     __ SmiUntag(a0);
@@ -610,8 +584,7 @@
     __ mov(t0, a0);
     __ jmp(&entry);
     __ bind(&loop);
-    __ dsll(a4, t0, kPointerSizeLog2);
-    __ Daddu(a4, a2, Operand(a4));
+    __ Dlsa(a4, a2, t0, kPointerSizeLog2);
     __ ld(a5, MemOperand(a4));
     __ push(a5);
     __ bind(&entry);
@@ -677,6 +650,19 @@
     // Leave construct frame.
   }
 
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(v0, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
   __ SmiScale(a4, a1, kPointerSizeLog2);
   __ Daddu(sp, sp, a4);
   __ Daddu(sp, sp, kPointerSize);
@@ -688,17 +674,23 @@
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, true);
+  Generate_JSConstructStubHelper(masm, false, true, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true, true);
+  Generate_JSConstructStubHelper(masm, true, false, false);
 }
 
 
 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, false);
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
 
@@ -778,8 +770,7 @@
     // a3: argc
     // s0: argv, i.e. points to first arg
     Label loop, entry;
-    __ dsll(a4, a3, kPointerSizeLog2);
-    __ daddu(a6, s0, a4);
+    __ Dlsa(a6, s0, a3, kPointerSizeLog2);
     __ b(&entry);
     __ nop();   // Branch delay slot nop.
     // a6 points past last arg.
@@ -841,10 +832,8 @@
 //   o sp: stack pointer
 //   o ra: return address
 //
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-mips.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -853,16 +842,19 @@
 
   __ Push(ra, fp, cp, a1);
   __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
-  __ Push(a3);
-
-  // Push zero for bytecode array offset.
-  __ Push(zero_reg);
 
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into kInterpreterBytecodeRegister.
   __ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  DCHECK(!debug_info.is(a0));
+  __ ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+  __ Branch(&load_debug_bytecode_array, ne, debug_info,
+            Operand(DebugInfo::uninitialized()));
   __ ld(kInterpreterBytecodeArrayRegister,
         FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+  __ bind(&bytecode_array_loaded);
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -874,6 +866,9 @@
               Operand(BYTECODE_ARRAY_TYPE));
   }
 
+  // Push new.target, bytecode array and zero for bytecode array offset.
+  __ Push(a3, kInterpreterBytecodeArrayRegister, zero_reg);
+
   // Allocate the local and temporary register file on the stack.
   {
     // Load frame size (word) from the BytecodeArray object.
@@ -904,44 +899,38 @@
 
   // TODO(rmcilroy): List of things not currently dealt with here but done in
   // fullcodegen's prologue:
-  //  - Support profiler (specifically profiling_counter).
   //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Allow simulator stop operations if FLAG_stop_at is set.
   //  - Code aging of the BytecodeArray object.
 
-  // Perform stack guard check.
-  {
-    Label ok;
-    __ LoadRoot(at, Heap::kStackLimitRootIndex);
-    __ Branch(&ok, hs, sp, Operand(at));
-    __ push(kInterpreterBytecodeArrayRegister);
-    __ CallRuntime(Runtime::kStackGuard);
-    __ pop(kInterpreterBytecodeArrayRegister);
-    __ bind(&ok);
-  }
-
   // Load bytecode offset and dispatch table into registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
   __ Daddu(kInterpreterRegisterFileRegister, fp,
            Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
   __ li(kInterpreterBytecodeOffsetRegister,
         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ Daddu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ li(kInterpreterDispatchTableRegister,
+        Operand(ExternalReference::interpreter_dispatch_table_address(
+            masm->isolate())));
 
   // Dispatch to the first bytecode handler for the function.
   __ Daddu(a0, kInterpreterBytecodeArrayRegister,
            kInterpreterBytecodeOffsetRegister);
   __ lbu(a0, MemOperand(a0));
-  __ dsll(at, a0, kPointerSizeLog2);
-  __ Daddu(at, kInterpreterDispatchTableRegister, at);
+  __ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
   __ ld(at, MemOperand(at));
   // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
   // and header removal.
   __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Call(at);
+
+  // Even though the first bytecode handler was called, we will never return.
+  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+  // Load debug copy of the bytecode array.
+  __ bind(&load_debug_bytecode_array);
+  __ ld(kInterpreterBytecodeArrayRegister,
+        FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+  __ Branch(&bytecode_array_loaded);
 }
 
 
@@ -966,7 +955,8 @@
 
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
   //  -- a2 : the address of the first argument to be pushed. Subsequent
@@ -991,7 +981,9 @@
   __ Branch(&loop_header, gt, a2, Operand(a3));
 
   // Call the target.
-  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            tail_call_mode),
+          RelocInfo::CODE_TARGET);
 }
 
 
@@ -1026,47 +1018,24 @@
 }
 
 
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(kInterpreterAccumulatorRegister);  // Save accumulator register.
-
-    // Pass the deoptimization type to the runtime system.
-    __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
-    __ push(a1);
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-
-    __ pop(kInterpreterAccumulatorRegister);  // Restore accumulator register.
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use this for interpreter deopts).
-  __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
   // Initialize register file register and dispatch table register.
   __ Daddu(kInterpreterRegisterFileRegister, fp,
            Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ Daddu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ li(kInterpreterDispatchTableRegister,
+        Operand(ExternalReference::interpreter_dispatch_table_address(
+            masm->isolate())));
 
   // Get the context from the frame.
-  // TODO(rmcilroy): Update interpreter frame to expect current context at the
-  // context slot instead of the function context.
   __ ld(kContextRegister,
         MemOperand(kInterpreterRegisterFileRegister,
                    InterpreterFrameConstants::kContextFromRegisterPointer));
 
   // Get the bytecode array pointer from the frame.
-  __ ld(a1,
-        MemOperand(kInterpreterRegisterFileRegister,
-                   InterpreterFrameConstants::kFunctionFromRegisterPointer));
-  __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ ld(kInterpreterBytecodeArrayRegister,
-        FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
+  __ ld(
+      kInterpreterBytecodeArrayRegister,
+      MemOperand(kInterpreterRegisterFileRegister,
+                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -1089,14 +1058,36 @@
   __ Daddu(a1, kInterpreterBytecodeArrayRegister,
            kInterpreterBytecodeOffsetRegister);
   __ lbu(a1, MemOperand(a1));
-  __ dsll(a1, a1, kPointerSizeLog2);
-  __ Daddu(a1, kInterpreterDispatchTableRegister, a1);
+  __ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
   __ ld(a1, MemOperand(a1));
   __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Jump(a1);
 }
 
 
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+    MacroAssembler* masm, Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass the deoptimization type to the runtime system.
+    __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(a1);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+    // Tear down internal frame.
+  }
+
+  // Drop state (we don't use these for interpreter deopts) and and pop the
+  // accumulator value into the accumulator register.
+  __ Drop(1);
+  __ Pop(kInterpreterAccumulatorRegister);
+
+  // Enter the bytecode dispatch.
+  Generate_EnterBytecodeDispatch(masm);
+}
+
+
 void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
 }
@@ -1111,22 +1102,30 @@
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the address of the interpreter entry trampoline as a return address.
+  // This simulates the initial call to bytecode handlers in interpreter entry
+  // trampoline. The return will never actually be taken, but our stack walker
+  // uses this address to determine whether a frame is interpreted.
+  __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+
+  Generate_EnterBytecodeDispatch(masm);
+}
+
 
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
 }
 
 
 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
 }
 
 
@@ -1346,13 +1345,12 @@
 
   // Load the next prototype.
   __ bind(&next_prototype);
-  __ ld(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
-  // End if the prototype is null or not hidden.
-  __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
-  __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ lwu(scratch, FieldMemOperand(map, Map::kBitField3Offset));
-  __ DecodeField<Map::IsHiddenPrototype>(scratch);
+  __ DecodeField<Map::HasHiddenPrototype>(scratch);
   __ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+
+  __ ld(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Iterate.
   __ Branch(&prototype_loop_start);
 
@@ -1377,8 +1375,7 @@
 
   // Do the compatible receiver check
   Label receiver_check_failed;
-  __ sll(at, a0, kPointerSizeLog2);
-  __ Daddu(t8, sp, at);
+  __ Dlsa(t8, sp, a0, kPointerSizeLog2);
   __ ld(t0, MemOperand(t8));
   CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
 
@@ -1512,6 +1509,7 @@
     Register scratch = a4;
     __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
     __ mov(a3, a2);
+    // Dlsa() cannot be used hare as scratch value used later.
     __ dsll(scratch, a0, kPointerSizeLog2);
     __ Daddu(a0, sp, Operand(scratch));
     __ ld(a1, MemOperand(a0));  // receiver
@@ -1582,8 +1580,7 @@
 
   // 2. Get the function to call (passed as receiver) from the stack.
   // a0: actual number of arguments
-  __ dsll(at, a0, kPointerSizeLog2);
-  __ daddu(at, sp, at);
+  __ Dlsa(at, sp, a0, kPointerSizeLog2);
   __ ld(a1, MemOperand(at));
 
   // 3. Shift arguments and return address one slot down on the stack
@@ -1594,8 +1591,7 @@
   {
     Label loop;
     // Calculate the copy start address (destination). Copy end address is sp.
-    __ dsll(at, a0, kPointerSizeLog2);
-    __ daddu(a2, sp, at);
+    __ Dlsa(a2, sp, a0, kPointerSizeLog2);
 
     __ bind(&loop);
     __ ld(at, MemOperand(a2, -kPointerSize));
@@ -1695,6 +1691,7 @@
     Register scratch = a4;
     __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
     __ mov(a2, a1);
+    // Dlsa() cannot be used hare as scratch value used later.
     __ dsll(scratch, a0, kPointerSizeLog2);
     __ Daddu(a0, sp, Operand(scratch));
     __ sd(a2, MemOperand(a0));  // receiver
@@ -1850,9 +1847,7 @@
 
     // Try to create the list from an arguments object.
     __ bind(&create_arguments);
-    __ ld(a2,
-          FieldMemOperand(a0, JSObject::kHeaderSize +
-                                  Heap::kArgumentsLengthIndex * kPointerSize));
+    __ ld(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
     __ ld(a4, FieldMemOperand(a0, JSObject::kElementsOffset));
     __ ld(at, FieldMemOperand(a4, FixedArray::kLengthOffset));
     __ Branch(&create_runtime, ne, a2, Operand(at));
@@ -1906,8 +1901,7 @@
     Label done, loop;
     __ bind(&loop);
     __ Branch(&done, eq, a4, Operand(a2));
-    __ dsll(at, a4, kPointerSizeLog2);
-    __ Daddu(at, a0, at);
+    __ Dlsa(at, a0, a4, kPointerSizeLog2);
     __ ld(at, FieldMemOperand(at, FixedArray::kHeaderSize));
     __ Push(at);
     __ Daddu(a4, a4, Operand(1));
@@ -1927,10 +1921,133 @@
   }
 }
 
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if the debugger is not active.
+  Label done;
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(masm->isolate());
+  __ li(at, Operand(debug_is_active));
+  __ lb(scratch1, MemOperand(at));
+  __ Branch(&done, ne, scratch1, Operand(zero_reg));
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+    __ Branch(&no_interpreter_frame, ne, scratch3,
+              Operand(Smi::FromInt(StackFrame::STUB)));
+    __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ Branch(&no_arguments_adaptor, ne, scratch3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Drop arguments adaptor frame and load arguments count.
+  __ mov(fp, scratch2);
+  __ ld(scratch1,
+        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(scratch1);
+  __ Branch(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ ld(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ld(scratch1,
+        FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(scratch1,
+        FieldMemOperand(scratch1,
+                        SharedFunctionInfo::kFormalParameterCountOffset));
+
+  __ bind(&formal_parameter_count_loaded);
+
+  // Calculate the end of destination area where we will put the arguments
+  // after we drop current frame. We add kPointerSize to count the receiver
+  // argument which is not included into formal parameters count.
+  Register dst_reg = scratch2;
+  __ Dlsa(dst_reg, fp, scratch1, kPointerSizeLog2);
+  __ Daddu(dst_reg, dst_reg,
+           Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+  Register src_reg = scratch1;
+  __ Dlsa(src_reg, sp, args_reg, kPointerSizeLog2);
+  // Count receiver argument as well (not included in args_reg).
+  __ Daddu(src_reg, src_reg, Operand(kPointerSize));
+
+  if (FLAG_debug_code) {
+    __ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+  }
+
+  // Restore caller's frame pointer and return address now as they will be
+  // overwritten by the copying loop.
+  __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+
+  // Both src_reg and dst_reg are pointing to the word after the one to copy,
+  // so they must be pre-decremented in the loop.
+  Register tmp_reg = scratch3;
+  Label loop, entry;
+  __ Branch(&entry);
+  __ bind(&loop);
+  __ Dsubu(src_reg, src_reg, Operand(kPointerSize));
+  __ Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
+  __ ld(tmp_reg, MemOperand(src_reg));
+  __ sd(tmp_reg, MemOperand(dst_reg));
+  __ bind(&entry);
+  __ Branch(&loop, ne, sp, Operand(src_reg));
+
+  // Leave current frame.
+  __ mov(sp, dst_reg);
+
+  __ bind(&done);
+}
+}  // namespace
 
 // static
 void Builtins::Generate_CallFunction(MacroAssembler* masm,
-                                     ConvertReceiverMode mode) {
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
   //  -- a1 : the function to call (checked to be a JSFunction)
@@ -1970,8 +2087,7 @@
       __ LoadGlobalProxy(a3);
     } else {
       Label convert_to_object, convert_receiver;
-      __ dsll(at, a0, kPointerSizeLog2);
-      __ daddu(at, sp, at);
+      __ Dlsa(at, sp, a0, kPointerSizeLog2);
       __ ld(a3, MemOperand(at));
       __ JumpIfSmi(a3, &convert_to_object);
       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
@@ -2007,8 +2123,7 @@
       __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
       __ bind(&convert_receiver);
     }
-    __ dsll(at, a0, kPointerSizeLog2);
-    __ daddu(at, sp, at);
+    __ Dlsa(at, sp, a0, kPointerSizeLog2);
     __ sd(a3, MemOperand(at));
   }
   __ bind(&done_convert);
@@ -2020,6 +2135,10 @@
   //  -- cp : the function context.
   // -----------------------------------
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
   __ lw(a2,
         FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
   ParameterCount actual(a0);
@@ -2038,18 +2157,22 @@
 
 
 // static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
   //  -- a1 : the function to call (checked to be a JSBoundFunction)
   // -----------------------------------
   __ AssertBoundFunction(a1);
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
   // Patch the receiver to [[BoundThis]].
   {
     __ ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
-    __ dsll(a4, a0, kPointerSizeLog2);
-    __ daddu(a4, a4, sp);
+    __ Dlsa(a4, sp, a0, kPointerSizeLog2);
     __ sd(at, MemOperand(a4));
   }
 
@@ -2090,11 +2213,9 @@
     __ mov(a5, zero_reg);
     __ bind(&loop);
     __ Branch(&done_loop, gt, a5, Operand(a0));
-    __ dsll(a6, a4, kPointerSizeLog2);
-    __ daddu(a6, a6, sp);
+    __ Dlsa(a6, sp, a4, kPointerSizeLog2);
     __ ld(at, MemOperand(a6));
-    __ dsll(a6, a5, kPointerSizeLog2);
-    __ daddu(a6, a6, sp);
+    __ Dlsa(a6, sp, a5, kPointerSizeLog2);
     __ sd(at, MemOperand(a6));
     __ Daddu(a4, a4, Operand(1));
     __ Daddu(a5, a5, Operand(1));
@@ -2111,11 +2232,9 @@
     __ bind(&loop);
     __ Dsubu(a4, a4, Operand(1));
     __ Branch(&done_loop, lt, a4, Operand(zero_reg));
-    __ dsll(a5, a4, kPointerSizeLog2);
-    __ daddu(a5, a5, a2);
+    __ Dlsa(a5, a2, a4, kPointerSizeLog2);
     __ ld(at, MemOperand(a5));
-    __ dsll(a5, a0, kPointerSizeLog2);
-    __ daddu(a5, a5, sp);
+    __ Dlsa(a5, sp, a0, kPointerSizeLog2);
     __ sd(at, MemOperand(a5));
     __ Daddu(a0, a0, Operand(1));
     __ Branch(&loop);
@@ -2133,7 +2252,8 @@
 
 
 // static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
   //  -- a1 : the target to call (can be any Object).
@@ -2143,12 +2263,23 @@
   __ JumpIfSmi(a1, &non_callable);
   __ bind(&non_smi);
   __ GetObjectType(a1, t1, t2);
-  __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
           RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
-  __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
           RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+  // Check if target has a [[Call]] internal method.
+  __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+  __ And(t1, t1, Operand(1 << Map::kIsCallable));
+  __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
   __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
 
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, a0, t0, t1, t2);
+  }
+
   // 1. Runtime fallback for Proxy [[Call]].
   __ Push(a1);
   // Increase the arguments size to include the pushed function and the
@@ -2161,18 +2292,13 @@
   // 2. Call to something else, which might have a [[Call]] internal method (if
   // not we raise an exception).
   __ bind(&non_function);
-  // Check if target has a [[Call]] internal method.
-  __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
-  __ And(t1, t1, Operand(1 << Map::kIsCallable));
-  __ Branch(&non_callable, eq, t1, Operand(zero_reg));
   // Overwrite the original receiver with the (original) target.
-  __ dsll(at, a0, kPointerSizeLog2);
-  __ daddu(at, sp, at);
+  __ Dlsa(at, sp, a0, kPointerSizeLog2);
   __ sd(a1, MemOperand(at));
   // Let the "call_as_function_delegate" take care of the rest.
   __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
   __ Jump(masm->isolate()->builtins()->CallFunction(
-              ConvertReceiverMode::kNotNullOrUndefined),
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
           RelocInfo::CODE_TARGET);
 
   // 3. Call to something that is not callable.
@@ -2253,11 +2379,9 @@
     __ mov(a5, zero_reg);
     __ bind(&loop);
     __ Branch(&done_loop, ge, a5, Operand(a0));
-    __ dsll(a6, a4, kPointerSizeLog2);
-    __ daddu(a6, a6, sp);
+    __ Dlsa(a6, sp, a4, kPointerSizeLog2);
     __ ld(at, MemOperand(a6));
-    __ dsll(a6, a5, kPointerSizeLog2);
-    __ daddu(a6, a6, sp);
+    __ Dlsa(a6, sp, a5, kPointerSizeLog2);
     __ sd(at, MemOperand(a6));
     __ Daddu(a4, a4, Operand(1));
     __ Daddu(a5, a5, Operand(1));
@@ -2274,11 +2398,9 @@
     __ bind(&loop);
     __ Dsubu(a4, a4, Operand(1));
     __ Branch(&done_loop, lt, a4, Operand(zero_reg));
-    __ dsll(a5, a4, kPointerSizeLog2);
-    __ daddu(a5, a5, a2);
+    __ Dlsa(a5, a2, a4, kPointerSizeLog2);
     __ ld(at, MemOperand(a5));
-    __ dsll(a5, a0, kPointerSizeLog2);
-    __ daddu(a5, a5, sp);
+    __ Dlsa(a5, sp, a0, kPointerSizeLog2);
     __ sd(at, MemOperand(a5));
     __ Daddu(a0, a0, Operand(1));
     __ Branch(&loop);
@@ -2357,8 +2479,7 @@
   // Called Construct on an exotic Object with a [[Construct]] internal method.
   {
     // Overwrite the original receiver with the (original) target.
-    __ dsll(at, a0, kPointerSizeLog2);
-    __ daddu(at, sp, at);
+    __ Dlsa(at, sp, a0, kPointerSizeLog2);
     __ sd(a1, MemOperand(at));
     // Let the "call_as_constructor_delegate" take care of the rest.
     __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
index 2531d6b..28812ad 100644
--- a/src/mips64/code-stubs-mips64.cc
+++ b/src/mips64/code-stubs-mips64.cc
@@ -90,9 +90,8 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
-                                          Condition cc, Strength strength);
+                                          Condition cc);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
                                     Register lhs,
                                     Register rhs,
@@ -273,7 +272,7 @@
 // Equality is almost reflexive (everything but NaN), so this is a test
 // for "identity and not NaN".
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
-                                          Condition cc, Strength strength) {
+                                          Condition cc) {
   Label not_identical;
   Label heap_number, return_equal;
   Register exp_mask_reg = t1;
@@ -294,13 +293,6 @@
     __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
     // Call runtime on identical SIMD values since we must throw a TypeError.
     __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
-    if (is_strong(strength)) {
-      // Call the runtime on anything that is converted in the semantics, since
-      // we need to throw a TypeError. Smis have already been ruled out.
-      __ Branch(&return_equal, eq, t0, Operand(HEAP_NUMBER_TYPE));
-      __ And(t0, t0, Operand(kIsNotStringMask));
-      __ Branch(slow, ne, t0, Operand(zero_reg));
-    }
   } else {
     __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
     // Comparing JS objects with <=, >= is complicated.
@@ -310,13 +302,6 @@
       __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
       // Call runtime on identical SIMD values since we must throw a TypeError.
       __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
-      if (is_strong(strength)) {
-        // Call the runtime on anything that is converted in the semantics,
-        // since we need to throw a TypeError. Smis and heap numbers have
-        // already been ruled out.
-        __ And(t0, t0, Operand(kIsNotStringMask));
-        __ Branch(slow, ne, t0, Operand(zero_reg));
-      }
       // Normally here we fall through to return_equal, but undefined is
       // special: (undefined == undefined) == true, but
       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
@@ -510,45 +495,55 @@
 
 // Fast negative check for internalized-to-internalized equality.
 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
-                                                     Register lhs,
-                                                     Register rhs,
+                                                     Register lhs, Register rhs,
                                                      Label* possible_strings,
-                                                     Label* not_both_strings) {
+                                                     Label* runtime_call) {
   DCHECK((lhs.is(a0) && rhs.is(a1)) ||
          (lhs.is(a1) && rhs.is(a0)));
 
   // a2 is object type of rhs.
-  Label object_test;
+  Label object_test, return_unequal, undetectable;
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   __ And(at, a2, Operand(kIsNotStringMask));
   __ Branch(&object_test, ne, at, Operand(zero_reg));
   __ And(at, a2, Operand(kIsNotInternalizedMask));
   __ Branch(possible_strings, ne, at, Operand(zero_reg));
   __ GetObjectType(rhs, a3, a3);
-  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+  __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
   __ And(at, a3, Operand(kIsNotInternalizedMask));
   __ Branch(possible_strings, ne, at, Operand(zero_reg));
 
-  // Both are internalized strings. We already checked they weren't the same
-  // pointer so they are not equal.
+  // Both are internalized. We already checked they weren't the same pointer so
+  // they are not equal. Return non-equal by returning the non-zero object
+  // pointer in v0.
   __ Ret(USE_DELAY_SLOT);
-  __ li(v0, Operand(1));   // Non-zero indicates not equal.
+  __ mov(v0, a0);  // In delay slot.
 
   __ bind(&object_test);
-  __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
-  __ GetObjectType(rhs, a2, a3);
-  __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
+  __ ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
+  __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
+  __ And(at, t0, Operand(1 << Map::kIsUndetectable));
+  __ Branch(&undetectable, ne, at, Operand(zero_reg));
+  __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+  __ Branch(&return_unequal, ne, at, Operand(zero_reg));
 
-  // If both objects are undetectable, they are equal.  Otherwise, they
-  // are not equal, since they are different objects and an object is not
-  // equal to undefined.
-  __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
-  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
-  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
-  __ and_(a0, a2, a3);
-  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+  __ GetInstanceType(a2, a2);
+  __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ GetInstanceType(a3, a3);
+  __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+  __ bind(&return_unequal);
+  // Return non-equal by returning the non-zero object pointer in v0.
   __ Ret(USE_DELAY_SLOT);
-  __ xori(v0, a0, 1 << Map::kIsUndetectable);
+  __ mov(v0, a0);  // In delay slot.
+
+  __ bind(&undetectable);
+  __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+  __ Branch(&return_unequal, eq, at, Operand(zero_reg));
+  __ Ret(USE_DELAY_SLOT);
+  __ li(v0, Operand(EQUAL));  // In delay slot.
 }
 
 
@@ -600,7 +595,7 @@
 
   // Handle the case where the objects are identical.  Either returns the answer
   // or goes to slow.  Only falls through if the objects were not identical.
-  EmitIdenticalObjectComparison(masm, &slow, cc, strength());
+  EmitIdenticalObjectComparison(masm, &slow, cc);
 
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
@@ -720,13 +715,21 @@
   // Never falls through to here.
 
   __ bind(&slow);
-  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
-  // a1 (rhs) second.
-  __ Push(lhs, rhs);
-  // Figure out which native to call and setup the arguments.
   if (cc == eq) {
-    __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(lhs, rhs);
+      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+    }
+    // Turn true into 0 and false into some non-zero value.
+    STATIC_ASSERT(EQUAL == 0);
+    __ LoadRoot(a0, Heap::kTrueValueRootIndex);
+    __ Ret(USE_DELAY_SLOT);
+    __ subu(v0, v0, a0);  // In delay slot.
   } else {
+    // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
+    // a1 (rhs) second.
+    __ Push(lhs, rhs);
     int ncr;  // NaN compare result.
     if (cc == lt || cc == le) {
       ncr = GREATER;
@@ -739,8 +742,7 @@
 
     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
     // tagged as a small integer.
-    __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
-                                             : Runtime::kCompare);
+    __ TailCallRuntime(Runtime::kCompare);
   }
 
   __ bind(&miss);
@@ -971,7 +973,6 @@
   __ cvt_d_w(double_exponent, single_scratch);
 
   // Returning or bailing out.
-  Counters* counters = isolate()->counters();
   if (exponent_type() == ON_STACK) {
     // The arguments are still on the stack.
     __ bind(&call_runtime);
@@ -985,7 +986,6 @@
     __ sdc1(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     DCHECK(heapnumber.is(v0));
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
     __ DropAndRet(2);
   } else {
     __ push(ra);
@@ -1001,7 +1001,6 @@
     __ MovFromFloatResult(double_result);
 
     __ bind(&done);
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
     __ Ret();
   }
 }
@@ -1073,8 +1072,7 @@
     __ mov(s1, a2);
   } else {
     // Compute the argv pointer in a callee-saved register.
-    __ dsll(s1, a0, kPointerSizeLog2);
-    __ Daddu(s1, sp, s1);
+    __ Dlsa(s1, sp, a0, kPointerSizeLog2);
     __ Dsubu(s1, s1, kPointerSize);
   }
 
@@ -1090,47 +1088,77 @@
   // a0 = argc
   __ mov(s0, a0);
   __ mov(s2, a1);
-  // a1 = argv (set in the delay slot after find_ra below).
 
   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
   // also need to reserve the 4 argument slots on the stack.
 
   __ AssertStackIsAligned();
 
-  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+  int frame_alignment_mask = frame_alignment - 1;
+  int result_stack_size;
+  if (result_size() <= 2) {
+    // a0 = argc, a1 = argv, a2 = isolate
+    __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+    __ mov(a1, s1);
+    result_stack_size = 0;
+  } else {
+    DCHECK_EQ(3, result_size());
+    // Allocate additional space for the result.
+    result_stack_size =
+        ((result_size() * kPointerSize) + frame_alignment_mask) &
+        ~frame_alignment_mask;
+    __ Dsubu(sp, sp, Operand(result_stack_size));
+
+    // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
+    __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
+    __ mov(a2, s1);
+    __ mov(a1, a0);
+    __ mov(a0, sp);
+  }
 
   // To let the GC traverse the return address of the exit frames, we need to
   // know where the return address is. The CEntryStub is unmovable, so
   // we can store the address on the stack to be able to find it again and
   // we never have to restore it, because it will not change.
   { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
-    // This branch-and-link sequence is needed to find the current PC on mips,
-    // saved to the ra register.
-    // Use masm-> here instead of the double-underscore macro since extra
-    // coverage code can interfere with the proper calculation of ra.
+    int kNumInstructionsToJump = 4;
     Label find_ra;
-    masm->bal(&find_ra);  // bal exposes branch delay slot.
-    masm->mov(a1, s1);
-    masm->bind(&find_ra);
-
     // Adjust the value in ra to point to the correct return location, 2nd
     // instruction past the real call into C code (the jalr(t9)), and push it.
     // This is the return address of the exit frame.
-    const int kNumInstructionsToJump = 5;
-    masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
-    masm->sd(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
+    if (kArchVariant >= kMips64r6) {
+      __ addiupc(ra, kNumInstructionsToJump + 1);
+    } else {
+      // This branch-and-link sequence is needed to find the current PC on mips
+      // before r6, saved to the ra register.
+      __ bal(&find_ra);  // bal exposes branch delay slot.
+      __ Daddu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
+    }
+    __ bind(&find_ra);
+
+    // This spot was reserved in EnterExitFrame.
+    __ sd(ra, MemOperand(sp, result_stack_size));
     // Stack space reservation moved to the branch delay slot below.
     // Stack is still aligned.
 
     // Call the C routine.
-    masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
-    masm->jalr(t9);
+    __ mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
+    __ jalr(t9);
     // Set up sp in the delay slot.
-    masm->daddiu(sp, sp, -kCArgsSlotsSize);
+    __ daddiu(sp, sp, -kCArgsSlotsSize);
     // Make sure the stored 'ra' points to this position.
     DCHECK_EQ(kNumInstructionsToJump,
               masm->InstructionsGeneratedSince(&find_ra));
   }
+  if (result_size() > 2) {
+    DCHECK_EQ(3, result_size());
+    // Read result values stored on stack.
+    __ ld(a0, MemOperand(v0, 2 * kPointerSize));
+    __ ld(v1, MemOperand(v0, 1 * kPointerSize));
+    __ ld(v0, MemOperand(v0, 0 * kPointerSize));
+  }
+  // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
 
   // Check result for exception sentinel.
   Label exception_returned;
@@ -1246,14 +1274,7 @@
   __ Move(kDoubleRegZero, 0.0);
 
   // Load argv in s0 register.
-  if (kMipsAbi == kN64) {
-    __ mov(s0, a4);  // 5th parameter in mips64 a4 (a4) register.
-  } else {  // Abi O32.
-    // 5th parameter on stack for O32 abi.
-    int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
-    offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
-    __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
-  }
+  __ mov(s0, a4);  // 5th parameter in mips64 a4 (a4) register.
 
   __ InitializeRootRegister();
 
@@ -1558,303 +1579,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  // The displacement is the offset of the last parameter (if any)
-  // relative to the frame pointer.
-  const int kDisplacement =
-      StandardFrameConstants::kCallerSPOffset - kPointerSize;
-  DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
-  DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
-  // Check that the key is a smiGenerateReadElement.
-  Label slow;
-  __ JumpIfNotSmi(a1, &slow);
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor;
-  __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
-  __ Branch(&adaptor,
-            eq,
-            a3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Check index (a1) against formal parameters count limit passed in
-  // through register a0. Use unsigned comparison to get negative
-  // check for free.
-  __ Branch(&slow, hs, a1, Operand(a0));
-
-  // Read the argument from the stack and return it.
-  __ dsubu(a3, a0, a1);
-  __ SmiScale(a7, a3, kPointerSizeLog2);
-  __ Daddu(a3, fp, Operand(a7));
-  __ Ret(USE_DELAY_SLOT);
-  __ ld(v0, MemOperand(a3, kDisplacement));
-
-  // Arguments adaptor case: Check index (a1) against actual arguments
-  // limit found in the arguments adaptor frame. Use unsigned
-  // comparison to get negative check for free.
-  __ bind(&adaptor);
-  __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
-
-  // Read the argument from the adaptor frame and return it.
-  __ dsubu(a3, a0, a1);
-  __ SmiScale(a7, a3, kPointerSizeLog2);
-  __ Daddu(a3, a2, Operand(a7));
-  __ Ret(USE_DELAY_SLOT);
-  __ ld(v0, MemOperand(a3, kDisplacement));
-
-  // Slow-case: Handle non-smi or out-of-bounds access to arguments
-  // by calling the runtime system.
-  __ bind(&slow);
-  __ push(a1);
-  __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
-  // a1 : function
-  // a2 : number of parameters (tagged)
-  // a3 : parameters pointer
-
-  DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
-  __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
-  __ Branch(&runtime, ne, a0,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Patch the arguments.length and the parameters pointer in the current frame.
-  __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiScale(a7, a2, kPointerSizeLog2);
-  __ Daddu(a4, a4, Operand(a7));
-  __ daddiu(a3, a4, StandardFrameConstants::kCallerSPOffset);
-
-  __ bind(&runtime);
-  __ Push(a1, a3, a2);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
-  // a1 : function
-  // a2 : number of parameters (tagged)
-  // a3 : parameters pointer
-  // Registers used over whole function:
-  //  a5 : arguments count (tagged)
-  //  a6 : mapped parameter count (tagged)
-
-  DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
-  __ Branch(&adaptor_frame, eq, a0,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // No adaptor, parameter count = argument count.
-  __ mov(a5, a2);
-  __ Branch(USE_DELAY_SLOT, &try_allocate);
-  __ mov(a6, a2);  // In delay slot.
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiScale(t2, a5, kPointerSizeLog2);
-  __ Daddu(a4, a4, Operand(t2));
-  __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // a5 = argument count (tagged)
-  // a6 = parameter count (tagged)
-  // Compute the mapped parameter count = min(a6, a5) in a6.
-  __ mov(a6, a2);
-  __ Branch(&try_allocate, le, a6, Operand(a5));
-  __ mov(a6, a5);
-
-  __ bind(&try_allocate);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  // If there are no mapped parameters, we do not need the parameter_map.
-  Label param_map_size;
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
-  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
-  __ mov(t1, zero_reg);  // In delay slot: param map size = 0 when a6 == 0.
-  __ SmiScale(t1, a6, kPointerSizeLog2);
-  __ daddiu(t1, t1, kParameterMapHeaderSize);
-  __ bind(&param_map_size);
-
-  // 2. Backing store.
-  __ SmiScale(t2, a5, kPointerSizeLog2);
-  __ Daddu(t1, t1, Operand(t2));
-  __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
-
-  // v0 = address of new object(s) (tagged)
-  // a2 = argument count (smi-tagged)
-  // Get the arguments boilerplate from the current native context into a4.
-  const int kNormalOffset =
-      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
-  const int kAliasedOffset =
-      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
-  __ ld(a4, NativeContextMemOperand());
-  Label skip2_ne, skip2_eq;
-  __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
-  __ ld(a4, MemOperand(a4, kNormalOffset));
-  __ bind(&skip2_ne);
-
-  __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
-  __ ld(a4, MemOperand(a4, kAliasedOffset));
-  __ bind(&skip2_eq);
-
-  // v0 = address of new object (tagged)
-  // a2 = argument count (smi-tagged)
-  // a4 = address of arguments map (tagged)
-  // a6 = mapped parameter count (tagged)
-  __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
-  __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
-  __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
-  __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
-
-  // Set up the callee in-object property.
-  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
-  __ AssertNotSmi(a1);
-  const int kCalleeOffset = JSObject::kHeaderSize +
-      Heap::kArgumentsCalleeIndex * kPointerSize;
-  __ sd(a1, FieldMemOperand(v0, kCalleeOffset));
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(a5);
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  const int kLengthOffset = JSObject::kHeaderSize +
-      Heap::kArgumentsLengthIndex * kPointerSize;
-  __ sd(a5, FieldMemOperand(v0, kLengthOffset));
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, a4 will point there, otherwise
-  // it will point to the backing store.
-  __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize));
-  __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
-
-  // v0 = address of new object (tagged)
-  // a2 = argument count (tagged)
-  // a4 = address of parameter map or backing store (tagged)
-  // a6 = mapped parameter count (tagged)
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  Label skip3;
-  __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
-  // Move backing store address to a1, because it is
-  // expected there when filling in the unmapped arguments.
-  __ mov(a1, a4);
-  __ bind(&skip3);
-
-  __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
-
-  __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
-  __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
-  __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
-  __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
-  __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
-  __ SmiScale(t2, a6, kPointerSizeLog2);
-  __ Daddu(a5, a4, Operand(t2));
-  __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
-  __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop, parameters_test;
-  __ mov(a5, a6);
-  __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
-  __ Dsubu(t1, t1, Operand(a6));
-  __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
-  __ SmiScale(t2, a5, kPointerSizeLog2);
-  __ Daddu(a1, a4, Operand(t2));
-  __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
-
-  // a1 = address of backing store (tagged)
-  // a4 = address of parameter map (tagged)
-  // a0 = temporary scratch (a.o., for address calculation)
-  // t1 = loop variable (tagged)
-  // a7 = the hole value
-  __ jmp(&parameters_test);
-
-  __ bind(&parameters_loop);
-  __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
-  __ SmiScale(a0, a5, kPointerSizeLog2);
-  __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-  __ Daddu(t2, a4, a0);
-  __ sd(t1, MemOperand(t2));
-  __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
-  __ Daddu(t2, a1, a0);
-  __ sd(a7, MemOperand(t2));
-  __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
-  __ bind(&parameters_test);
-  __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
-
-  // Restore t1 = argument count (tagged).
-  __ ld(a5, FieldMemOperand(v0, kLengthOffset));
-
-  __ bind(&skip_parameter_map);
-  // v0 = address of new object (tagged)
-  // a1 = address of backing store (tagged)
-  // a5 = argument count (tagged)
-  // a6 = mapped parameter count (tagged)
-  // t1 = scratch
-  // Copy arguments header and remaining slots (if there are any).
-  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
-  __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
-  __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
-
-  Label arguments_loop, arguments_test;
-  __ SmiScale(t2, a6, kPointerSizeLog2);
-  __ Dsubu(a3, a3, Operand(t2));
-  __ jmp(&arguments_test);
-
-  __ bind(&arguments_loop);
-  __ Dsubu(a3, a3, Operand(kPointerSize));
-  __ ld(a4, MemOperand(a3, 0));
-  __ SmiScale(t2, a6, kPointerSizeLog2);
-  __ Daddu(t1, a1, Operand(t2));
-  __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
-  __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
-
-  __ bind(&arguments_test);
-  __ Branch(&arguments_loop, lt, a6, Operand(a5));
-
-  // Return.
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  // a5 = argument count (tagged)
-  __ bind(&runtime);
-  __ Push(a1, a3, a5);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
   // Return address is in ra.
   Label slow;
@@ -1878,122 +1602,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
-  // a1 : function
-  // a2 : number of parameters (tagged)
-  // a3 : parameters pointer
-
-  DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label try_allocate, runtime;
-  __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
-  __ Branch(&try_allocate, ne, a0,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Patch the arguments.length and the parameters pointer.
-  __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiScale(at, a2, kPointerSizeLog2);
-  __ Daddu(a4, a4, Operand(at));
-  __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Try the new space allocation. Start out with computing the size
-  // of the arguments object and the elements array in words.
-  Label add_arguments_object;
-  __ bind(&try_allocate);
-  __ SmiUntag(t1, a2);
-  __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg));
-
-  __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize / kPointerSize));
-  __ bind(&add_arguments_object);
-  __ Daddu(t1, t1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
-
-  // Do the allocation of both objects in one go.
-  __ Allocate(t1, v0, a4, a5, &runtime,
-              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
-  // Get the arguments boilerplate from the current native context.
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, a4);
-
-  __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
-  __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
-  __ sd(a5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
-  __ sd(a5, FieldMemOperand(v0, JSObject::kElementsOffset));
-
-  // Get the length (smi tagged) and set that as an in-object property too.
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  __ AssertSmi(a2);
-  __ sd(a2,
-        FieldMemOperand(v0, JSObject::kHeaderSize +
-                                Heap::kArgumentsLengthIndex * kPointerSize));
-
-  Label done;
-  __ Branch(&done, eq, a2, Operand(zero_reg));
-
-  // Set up the elements pointer in the allocated arguments object and
-  // initialize the header in the elements fixed array.
-  __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
-  __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
-  __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
-  __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
-  __ sd(a2, FieldMemOperand(a4, FixedArray::kLengthOffset));
-  __ SmiUntag(a2);
-
-  // Copy the fixed array slots.
-  Label loop;
-  // Set up a4 to point to the first array slot.
-  __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ bind(&loop);
-  // Pre-decrement a3 with kPointerSize on each iteration.
-  // Pre-decrement in order to skip receiver.
-  __ Daddu(a3, a3, Operand(-kPointerSize));
-  __ ld(a5, MemOperand(a3));
-  // Post-increment a4 with kPointerSize on each iteration.
-  __ sd(a5, MemOperand(a4));
-  __ Daddu(a4, a4, Operand(kPointerSize));
-  __ Dsubu(a2, a2, Operand(1));
-  __ Branch(&loop, ne, a2, Operand(zero_reg));
-
-  // Return.
-  __ bind(&done);
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ Push(a1, a3, a2);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
-  // a2 : number of parameters (tagged)
-  // a3 : parameters pointer
-  // a4 : rest parameter index (tagged)
-  // Check if the calling frame is an arguments adaptor frame.
-
-  Label runtime;
-  __ ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a5, MemOperand(a0, StandardFrameConstants::kContextOffset));
-  __ Branch(&runtime, ne, a5,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Patch the arguments.length and the parameters pointer.
-  __ ld(a2, MemOperand(a0, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiScale(at, a2, kPointerSizeLog2);
-
-  __ Daddu(a3, a0, Operand(at));
-  __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ Push(a2, a3, a4);
-  __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -2078,50 +1686,51 @@
   __ ld(subject, MemOperand(sp, kSubjectOffset));
   __ JumpIfSmi(subject, &runtime);
   __ mov(a3, subject);  // Make a copy of the original subject string.
-  __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+
   // subject: subject string
   // a3: subject string
-  // a0: subject string instance type
   // regexp_data: RegExp data (FixedArray)
   // Handle subject string according to its encoding and representation:
-  // (1) Sequential string?  If yes, go to (5).
-  // (2) Anything but sequential or cons?  If yes, go to (6).
-  // (3) Cons string.  If the string is flat, replace subject with first string.
-  //     Otherwise bailout.
-  // (4) Is subject external?  If yes, go to (7).
-  // (5) Sequential string.  Load regexp code according to encoding.
+  // (1) Sequential string?  If yes, go to (4).
+  // (2) Sequential or cons?  If not, go to (5).
+  // (3) Cons string.  If the string is flat, replace subject with first string
+  //     and go to (1). Otherwise bail out to runtime.
+  // (4) Sequential string.  Load regexp code according to encoding.
   // (E) Carry on.
   /// [...]
 
   // Deferred code at the end of the stub:
-  // (6) Not a long external string?  If yes, go to (8).
-  // (7) External string.  Make it, offset-wise, look like a sequential string.
-  //     Go to (5).
-  // (8) Short external string or not a string?  If yes, bail out to runtime.
-  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+  // (5) Long external string?  If not, go to (7).
+  // (6) External string.  Make it, offset-wise, look like a sequential string.
+  //     Go to (4).
+  // (7) Short external string or not a string?  If yes, bail out to runtime.
+  // (8) Sliced string.  Replace subject with parent.  Go to (1).
 
-  Label check_underlying;   // (4)
-  Label seq_string;         // (5)
-  Label not_seq_nor_cons;   // (6)
-  Label external_string;    // (7)
-  Label not_long_external;  // (8)
+  Label check_underlying;   // (1)
+  Label seq_string;         // (4)
+  Label not_seq_nor_cons;   // (5)
+  Label external_string;    // (6)
+  Label not_long_external;  // (7)
 
-  // (1) Sequential string?  If yes, go to (5).
+  __ bind(&check_underlying);
+  __ ld(a2, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+
+  // (1) Sequential string?  If yes, go to (4).
   __ And(a1,
          a0,
          Operand(kIsNotStringMask |
                  kStringRepresentationMask |
                  kShortExternalStringMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
-  __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (5).
+  __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (4).
 
-  // (2) Anything but sequential or cons?  If yes, go to (6).
+  // (2) Sequential or cons?  If not, go to (5).
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
-  // Go to (6).
+  // Go to (5).
   __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
 
   // (3) Cons string.  Check that it's flat.
@@ -2130,19 +1739,9 @@
   __ LoadRoot(a1, Heap::kempty_stringRootIndex);
   __ Branch(&runtime, ne, a0, Operand(a1));
   __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  __ jmp(&check_underlying);
 
-  // (4) Is subject external?  If yes, go to (7).
-  __ bind(&check_underlying);
-  __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(at, a0, Operand(kStringRepresentationMask));
-  // The underlying external string is never a short external string.
-  STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
-  STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
-  __ Branch(&external_string, ne, at, Operand(zero_reg));  // Go to (7).
-
-  // (5) Sequential string.  Load regexp code according to encoding.
+  // (4) Sequential string.  Load regexp code according to encoding.
   __ bind(&seq_string);
   // subject: sequential subject string (or look-alike, external string)
   // a3: original subject string
@@ -2182,7 +1781,7 @@
 
   // Isolates: note we add an additional parameter here (isolate pointer).
   const int kRegExpExecuteArguments = 9;
-  const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4;
+  const int kParameterRegisters = 8;
   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
 
   // Stack pointer now points to cell where return address is to be written.
@@ -2203,58 +1802,28 @@
   //   [sp + 1] - Argument 5
   //   [sp + 0] - saved ra
 
-  if (kMipsAbi == kN64) {
-    // Argument 9: Pass current isolate address.
-    __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
-    __ sd(a0, MemOperand(sp, 1 * kPointerSize));
+  // Argument 9: Pass current isolate address.
+  __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+  __ sd(a0, MemOperand(sp, 1 * kPointerSize));
 
-    // Argument 8: Indicate that this is a direct call from JavaScript.
-    __ li(a7, Operand(1));
+  // Argument 8: Indicate that this is a direct call from JavaScript.
+  __ li(a7, Operand(1));
 
-    // Argument 7: Start (high end) of backtracking stack memory area.
-    __ li(a0, Operand(address_of_regexp_stack_memory_address));
-    __ ld(a0, MemOperand(a0, 0));
-    __ li(a2, Operand(address_of_regexp_stack_memory_size));
-    __ ld(a2, MemOperand(a2, 0));
-    __ daddu(a6, a0, a2);
+  // Argument 7: Start (high end) of backtracking stack memory area.
+  __ li(a0, Operand(address_of_regexp_stack_memory_address));
+  __ ld(a0, MemOperand(a0, 0));
+  __ li(a2, Operand(address_of_regexp_stack_memory_size));
+  __ ld(a2, MemOperand(a2, 0));
+  __ daddu(a6, a0, a2);
 
-    // Argument 6: Set the number of capture registers to zero to force global
-    // regexps to behave as non-global. This does not affect non-global regexps.
-    __ mov(a5, zero_reg);
+  // Argument 6: Set the number of capture registers to zero to force global
+  // regexps to behave as non-global. This does not affect non-global regexps.
+  __ mov(a5, zero_reg);
 
-    // Argument 5: static offsets vector buffer.
-    __ li(a4, Operand(
-          ExternalReference::address_of_static_offsets_vector(isolate())));
-  } else {  // O32.
-    DCHECK(kMipsAbi == kO32);
-
-    // Argument 9: Pass current isolate address.
-    // CFunctionArgumentOperand handles MIPS stack argument slots.
-    __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
-    __ sd(a0, MemOperand(sp, 5 * kPointerSize));
-
-    // Argument 8: Indicate that this is a direct call from JavaScript.
-    __ li(a0, Operand(1));
-    __ sd(a0, MemOperand(sp, 4 * kPointerSize));
-
-    // Argument 7: Start (high end) of backtracking stack memory area.
-    __ li(a0, Operand(address_of_regexp_stack_memory_address));
-    __ ld(a0, MemOperand(a0, 0));
-    __ li(a2, Operand(address_of_regexp_stack_memory_size));
-    __ ld(a2, MemOperand(a2, 0));
-    __ daddu(a0, a0, a2);
-    __ sd(a0, MemOperand(sp, 3 * kPointerSize));
-
-    // Argument 6: Set the number of capture registers to zero to force global
-    // regexps to behave as non-global. This does not affect non-global regexps.
-    __ mov(a0, zero_reg);
-    __ sd(a0, MemOperand(sp, 2 * kPointerSize));
-
-    // Argument 5: static offsets vector buffer.
-    __ li(a0, Operand(
-          ExternalReference::address_of_static_offsets_vector(isolate())));
-    __ sd(a0, MemOperand(sp, 1 * kPointerSize));
-  }
+  // Argument 5: static offsets vector buffer.
+  __ li(
+      a4,
+      Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
 
   // For arguments 4 and 3 get string length, calculate start of string data
   // and calculate the shift of the index (0 for one_byte and 1 for two byte).
@@ -2415,12 +1984,12 @@
   __ TailCallRuntime(Runtime::kRegExpExec);
 
   // Deferred code for string handling.
-  // (6) Not a long external string?  If yes, go to (8).
+  // (5) Long external string?  If not, go to (7).
   __ bind(&not_seq_nor_cons);
-  // Go to (8).
+  // Go to (7).
   __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
 
-  // (7) External string.  Make it, offset-wise, look like a sequential string.
+  // (6) External string.  Make it, offset-wise, look like a sequential string.
   __ bind(&external_string);
   __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
@@ -2440,20 +2009,20 @@
   __ Dsubu(subject,
           subject,
           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
-  __ jmp(&seq_string);    // Go to (5).
+  __ jmp(&seq_string);  // Go to (4).
 
-  // (8) Short external string or not a string?  If yes, bail out to runtime.
+  // (7) Short external string or not a string?  If yes, bail out to runtime.
   __ bind(&not_long_external);
   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
   __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
   __ Branch(&runtime, ne, at, Operand(zero_reg));
 
-  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+  // (8) Sliced string.  Replace subject with parent.  Go to (4).
   // Load offset into t0 and replace subject string with parent.
   __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ SmiUntag(t0);
   __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
-  __ jmp(&check_underlying);  // Go to (4).
+  __ jmp(&check_underlying);  // Go to (1).
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -2719,7 +2288,8 @@
   __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
 
   __ bind(&call_function);
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+                                                    tail_call_mode()),
           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
           USE_DELAY_SLOT);
   __ li(a0, Operand(argc));  // In delay slot.
@@ -2759,7 +2329,7 @@
   __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
 
   __ bind(&call);
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
           USE_DELAY_SLOT);
   __ li(a0, Operand(argc));  // In delay slot.
@@ -3163,8 +2733,7 @@
 
   // Locate first character of substring to copy.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ dsll(a4, a3, 1);
-  __ Daddu(a5, a5, a4);
+  __ Dlsa(a5, a5, a3, 1);
   // Locate first character of result.
   __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
 
@@ -3291,6 +2860,39 @@
 }
 
 
+void ToNameStub::Generate(MacroAssembler* masm) {
+  // The ToName stub takes on argument in a0.
+  Label is_number;
+  __ JumpIfSmi(a0, &is_number);
+
+  Label not_name;
+  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+  __ GetObjectType(a0, a1, a1);
+  // a0: receiver
+  // a1: receiver instance type
+  __ Branch(&not_name, gt, a1, Operand(LAST_NAME_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&not_name);
+
+  Label not_heap_number;
+  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+  __ bind(&is_number);
+  NumberToStringStub stub(isolate());
+  __ TailCallStub(&stub);
+  __ bind(&not_heap_number);
+
+  Label not_oddball;
+  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
+  __ bind(&not_oddball);
+
+  __ push(a0);  // Push argument.
+  __ TailCallRuntime(Runtime::kToName);
+}
+
+
 void StringHelper::GenerateFlatOneByteStringEquals(
     MacroAssembler* masm, Register left, Register right, Register scratch1,
     Register scratch2, Register scratch3) {
@@ -3463,18 +3065,14 @@
 
   __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
-  if (op() != Token::EQ_STRICT && is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
-  } else {
-    if (!Token::IsEqualityOp(op())) {
-      __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
-      __ AssertSmi(a1);
-      __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
-      __ AssertSmi(a0);
-    }
-    __ Ret(USE_DELAY_SLOT);
-    __ Dsubu(v0, a1, a0);
+  if (!Token::IsEqualityOp(op())) {
+    __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+    __ AssertSmi(a1);
+    __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+    __ AssertSmi(a0);
   }
+  __ Ret(USE_DELAY_SLOT);
+  __ Dsubu(v0, a1, a0);
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -3572,7 +3170,7 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
                      CompareICState::GENERIC, CompareICState::GENERIC);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
@@ -3802,8 +3400,6 @@
   if (Token::IsEqualityOp(op())) {
     __ Ret(USE_DELAY_SLOT);
     __ dsubu(v0, a0, a1);
-  } else if (is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   } else {
     if (op() == Token::LT || op() == Token::LTE) {
       __ li(a2, Operand(Smi::FromInt(GREATER)));
@@ -3899,16 +3495,14 @@
 
     // Scale the index by multiplying by the entry size.
     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
-    __ dsll(at, index, 1);
-    __ Daddu(index, index, at);  // index *= 3.
+    __ Dlsa(index, index, index, 1);  // index *= 3.
 
     Register entity_name = scratch0;
     // Having undefined at this place means the name is not contained.
     STATIC_ASSERT(kSmiTagSize == 1);
     Register tmp = properties;
 
-    __ dsll(scratch0, index, kPointerSizeLog2);
-    __ Daddu(tmp, properties, scratch0);
+    __ Dlsa(tmp, properties, index, kPointerSizeLog2);
     __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
 
     DCHECK(!tmp.is(entity_name));
@@ -3997,13 +3591,10 @@
     // Scale the index by multiplying by the entry size.
     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
     // scratch2 = scratch2 * 3.
-
-    __ dsll(at, scratch2, 1);
-    __ Daddu(scratch2, scratch2, at);
+    __ Dlsa(scratch2, scratch2, scratch2, 1);
 
     // Check if the key is identical to the name.
-    __ dsll(at, scratch2, kPointerSizeLog2);
-    __ Daddu(scratch2, elements, at);
+    __ Dlsa(scratch2, elements, scratch2, kPointerSizeLog2);
     __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
     __ Branch(done, eq, name, Operand(at));
   }
@@ -4084,14 +3675,10 @@
     // Scale the index by multiplying by the entry size.
     STATIC_ASSERT(NameDictionary::kEntrySize == 3);
     // index *= 3.
-    __ mov(at, index);
-    __ dsll(index, index, 1);
-    __ Daddu(index, index, at);
-
+    __ Dlsa(index, index, index, 1);
 
     STATIC_ASSERT(kSmiTagSize == 1);
-    __ dsll(index, index, kPointerSizeLog2);
-    __ Daddu(index, index, dictionary);
+    __ Dlsa(index, dictionary, index, kPointerSizeLog2);
     __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
 
     // Having undefined at this place means the name is not contained.
@@ -4191,11 +3778,8 @@
                            regs_.scratch0(),
                            &dont_need_remembered_set);
 
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch0(),
-                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                     ne,
-                     &dont_need_remembered_set);
+    __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+                        &dont_need_remembered_set);
 
     // First notify the incremental marker if necessary, then update the
     // remembered set.
@@ -5076,8 +4660,7 @@
   switch (argument_count()) {
     case ANY:
     case MORE_THAN_ONE:
-      __ dsll(at, a0, kPointerSizeLog2);
-      __ Daddu(at, sp, at);
+      __ Dlsa(at, sp, a0, kPointerSizeLog2);
       __ sd(a1, MemOperand(at));
       __ li(at, Operand(3));
       __ Daddu(a0, a0, at);
@@ -5170,6 +4753,609 @@
 }
 
 
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a1 : target
+  //  -- a3 : new target
+  //  -- cp : context
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertFunction(a1);
+  __ AssertReceiver(a3);
+
+  // Verify that the new target is a JSFunction.
+  Label new_object;
+  __ GetObjectType(a3, a2, a2);
+  __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // Load the initial map and verify that it's in fact a map.
+  __ ld(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
+  __ JumpIfSmi(a2, &new_object);
+  __ GetObjectType(a2, a0, a0);
+  __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
+
+  // Fall back to runtime if the target differs from the new target's
+  // initial map constructor.
+  __ ld(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+  __ Branch(&new_object, ne, a0, Operand(a1));
+
+  // Allocate the JSObject on the heap.
+  Label allocate, done_allocate;
+  __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+  __ Allocate(a4, v0, a5, a0, &allocate, SIZE_IN_WORDS);
+  __ bind(&done_allocate);
+
+  // Initialize the JSObject fields.
+  __ sd(a2, MemOperand(v0, JSObject::kMapOffset));
+  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+  __ sd(a3, MemOperand(v0, JSObject::kPropertiesOffset));
+  __ sd(a3, MemOperand(v0, JSObject::kElementsOffset));
+  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+  __ Daddu(a1, v0, Operand(JSObject::kHeaderSize));
+
+  // ----------- S t a t e -------------
+  //  -- v0 : result (untagged)
+  //  -- a1 : result fields (untagged)
+  //  -- a5 : result end (untagged)
+  //  -- a2 : initial map
+  //  -- cp : context
+  //  -- ra : return address
+  // -----------------------------------
+
+  // Perform in-object slack tracking if requested.
+  Label slack_tracking;
+  STATIC_ASSERT(Map::kNoSlackTracking == 0);
+  __ lwu(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+  __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
+  __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(zero_reg));
+  __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);  // In delay slot.
+  {
+    // Initialize all in-object fields with undefined.
+    __ InitializeFieldsWithFiller(a1, a5, a0);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ Ret(USE_DELAY_SLOT);
+    __ Daddu(v0, v0, Operand(kHeapObjectTag));  // In delay slot.
+  }
+  __ bind(&slack_tracking);
+  {
+    // Decrease generous allocation count.
+    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+    __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
+    __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+
+    // Initialize the in-object fields with undefined.
+    __ lbu(a4, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+    __ dsll(a4, a4, kPointerSizeLog2);
+    __ Dsubu(a4, a5, a4);
+    __ InitializeFieldsWithFiller(a1, a4, a0);
+
+    // Initialize the remaining (reserved) fields with one pointer filler map.
+    __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
+    __ InitializeFieldsWithFiller(a1, a5, a0);
+
+    // Check if we can finalize the instance size.
+    Label finalize;
+    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+    __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
+    __ Branch(USE_DELAY_SLOT, &finalize, eq, a3, Operand(zero_reg));
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ Daddu(v0, v0, Operand(kHeapObjectTag));  // In delay slot.
+    __ Ret();
+
+    // Finalize the instance size.
+    __ bind(&finalize);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(v0, a2);
+      __ CallRuntime(Runtime::kFinalizeInstanceSize);
+      __ Pop(v0);
+    }
+    __ Ret();
+  }
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize == 1);
+    __ dsll(a4, a4, kPointerSizeLog2 + kSmiShiftSize + kSmiTagSize);
+    __ SmiTag(a4);
+    __ Push(a2, a4);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Pop(a2);
+  }
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ Dsubu(v0, v0, Operand(kHeapObjectTag));
+  __ lbu(a5, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+  __ Dlsa(a5, v0, a5, kPointerSizeLog2);
+  __ jmp(&done_allocate);
+
+  // Fall back to %NewObject.
+  __ bind(&new_object);
+  __ Push(a1, a3);
+  __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make a2 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ Branch(USE_DELAY_SLOT, &loop_entry);
+    __ mov(a2, fp);  // In delay slot.
+    __ bind(&loop);
+    __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ ld(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+    __ Branch(&loop, ne, a1, Operand(a3));
+  }
+
+  // Check if we have rest parameters (only possible if we have an
+  // arguments adaptor frame below the function frame).
+  Label no_rest_parameters;
+  __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+  __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&no_rest_parameters, ne, a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Check if the arguments adaptor frame contains more arguments than
+  // specified by the function's internal formal parameter count.
+  Label rest_parameters;
+  __ SmiLoadUntag(
+      a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a1,
+        FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ Dsubu(a0, a0, Operand(a1));
+  __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
+
+  // Return an empty rest parameter array.
+  __ bind(&no_rest_parameters);
+  {
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- ra : return address
+    // -----------------------------------
+
+    // Allocate an empty rest parameter array.
+    Label allocate, done_allocate;
+    __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the rest parameter array in v0.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
+    __ sd(a1, FieldMemOperand(v0, JSArray::kMapOffset));
+    __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+    __ sd(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
+    __ sd(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
+    __ Move(a1, Smi::FromInt(0));
+    __ Ret(USE_DELAY_SLOT);
+    __ sd(a1, FieldMemOperand(v0, JSArray::kLengthOffset));  // In delay slot
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(Smi::FromInt(JSArray::kSize));
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+    }
+    __ jmp(&done_allocate);
+  }
+
+  __ bind(&rest_parameters);
+  {
+    // Compute the pointer to the first rest parameter (skippping the receiver).
+    __ Dlsa(a2, a2, a0, kPointerSizeLog2);
+    __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+                             1 * kPointerSize));
+
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- a0 : number of rest parameters
+    //  -- a2 : pointer to first rest parameters
+    //  -- ra : return address
+    // -----------------------------------
+
+    // Allocate space for the rest parameter array plus the backing store.
+    Label allocate, done_allocate;
+    __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+    __ Dlsa(a1, a1, a0, kPointerSizeLog2);
+    __ Allocate(a1, v0, a3, a4, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Compute arguments.length in a4.
+    __ SmiTag(a4, a0);
+
+    // Setup the elements array in v0.
+    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+    __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
+    __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
+    {
+      Label loop, done_loop;
+      __ Dlsa(a1, a3, a0, kPointerSizeLog2);
+      __ bind(&loop);
+      __ Branch(&done_loop, eq, a1, Operand(a3));
+      __ ld(at, MemOperand(a2, 0 * kPointerSize));
+      __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
+      __ Dsubu(a2, a2, Operand(1 * kPointerSize));
+      __ Daddu(a3, a3, Operand(1 * kPointerSize));
+      __ Branch(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Setup the rest parameter array in a3.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
+    __ sd(at, FieldMemOperand(a3, JSArray::kMapOffset));
+    __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+    __ sd(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
+    __ sd(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ sd(a4, FieldMemOperand(a3, JSArray::kLengthOffset));
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ Ret(USE_DELAY_SLOT);
+    __ mov(v0, a3);  // In delay slot
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(a0);
+      __ SmiTag(a1);
+      __ Push(a0, a2, a1);
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+      __ Pop(a0, a2);
+      __ SmiUntag(a0);
+    }
+    __ jmp(&done_allocate);
+  }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+  __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2,
+         FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ Lsa(a3, fp, a2, kPointerSizeLog2);
+  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ SmiTag(a2);
+
+  // a1 : function
+  // a2 : number of parameters (tagged)
+  // a3 : parameters pointer
+  // Registers used over whole function:
+  //  a5 : arguments count (tagged)
+  //  a6 : mapped parameter count (tagged)
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor_frame, eq, a0,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // No adaptor, parameter count = argument count.
+  __ mov(a5, a2);
+  __ Branch(USE_DELAY_SLOT, &try_allocate);
+  __ mov(a6, a2);  // In delay slot.
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiScale(t2, a5, kPointerSizeLog2);
+  __ Daddu(a4, a4, Operand(t2));
+  __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // a5 = argument count (tagged)
+  // a6 = parameter count (tagged)
+  // Compute the mapped parameter count = min(a6, a5) in a6.
+  __ mov(a6, a2);
+  __ Branch(&try_allocate, le, a6, Operand(a5));
+  __ mov(a6, a5);
+
+  __ bind(&try_allocate);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  // If there are no mapped parameters, we do not need the parameter_map.
+  Label param_map_size;
+  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
+  __ mov(t1, zero_reg);  // In delay slot: param map size = 0 when a6 == 0.
+  __ SmiScale(t1, a6, kPointerSizeLog2);
+  __ daddiu(t1, t1, kParameterMapHeaderSize);
+  __ bind(&param_map_size);
+
+  // 2. Backing store.
+  __ SmiScale(t2, a5, kPointerSizeLog2);
+  __ Daddu(t1, t1, Operand(t2));
+  __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ Daddu(t1, t1, Operand(JSSloppyArgumentsObject::kSize));
+
+  // Do the allocation of all three objects in one go.
+  __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
+
+  // v0 = address of new object(s) (tagged)
+  // a2 = argument count (smi-tagged)
+  // Get the arguments boilerplate from the current native context into a4.
+  const int kNormalOffset =
+      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+  const int kAliasedOffset =
+      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+  __ ld(a4, NativeContextMemOperand());
+  Label skip2_ne, skip2_eq;
+  __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
+  __ ld(a4, MemOperand(a4, kNormalOffset));
+  __ bind(&skip2_ne);
+
+  __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
+  __ ld(a4, MemOperand(a4, kAliasedOffset));
+  __ bind(&skip2_eq);
+
+  // v0 = address of new object (tagged)
+  // a2 = argument count (smi-tagged)
+  // a4 = address of arguments map (tagged)
+  // a6 = mapped parameter count (tagged)
+  __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
+  __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
+  __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  // Set up the callee in-object property.
+  __ AssertNotSmi(a1);
+  __ sd(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(a5);
+  __ sd(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+  // Set up the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, a4 will point there, otherwise
+  // it will point to the backing store.
+  __ Daddu(a4, v0, Operand(JSSloppyArgumentsObject::kSize));
+  __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  // v0 = address of new object (tagged)
+  // a2 = argument count (tagged)
+  // a4 = address of parameter map or backing store (tagged)
+  // a6 = mapped parameter count (tagged)
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  Label skip3;
+  __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
+  // Move backing store address to a1, because it is
+  // expected there when filling in the unmapped arguments.
+  __ mov(a1, a4);
+  __ bind(&skip3);
+
+  __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
+
+  __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
+  __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
+  __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
+  __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
+  __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
+  __ SmiScale(t2, a6, kPointerSizeLog2);
+  __ Daddu(a5, a4, Operand(t2));
+  __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
+  __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+  __ mov(a5, a6);
+  __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+  __ Dsubu(t1, t1, Operand(a6));
+  __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
+  __ SmiScale(t2, a5, kPointerSizeLog2);
+  __ Daddu(a1, a4, Operand(t2));
+  __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
+
+  // a1 = address of backing store (tagged)
+  // a4 = address of parameter map (tagged)
+  // a0 = temporary scratch (a.o., for address calculation)
+  // t1 = loop variable (tagged)
+  // a7 = the hole value
+  __ jmp(&parameters_test);
+
+  __ bind(&parameters_loop);
+  __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
+  __ SmiScale(a0, a5, kPointerSizeLog2);
+  __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+  __ Daddu(t2, a4, a0);
+  __ sd(t1, MemOperand(t2));
+  __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+  __ Daddu(t2, a1, a0);
+  __ sd(a7, MemOperand(t2));
+  __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
+  __ bind(&parameters_test);
+  __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
+
+  // Restore t1 = argument count (tagged).
+  __ ld(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+  __ bind(&skip_parameter_map);
+  // v0 = address of new object (tagged)
+  // a1 = address of backing store (tagged)
+  // a5 = argument count (tagged)
+  // a6 = mapped parameter count (tagged)
+  // t1 = scratch
+  // Copy arguments header and remaining slots (if there are any).
+  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+  __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
+  __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
+
+  Label arguments_loop, arguments_test;
+  __ SmiScale(t2, a6, kPointerSizeLog2);
+  __ Dsubu(a3, a3, Operand(t2));
+  __ jmp(&arguments_test);
+
+  __ bind(&arguments_loop);
+  __ Dsubu(a3, a3, Operand(kPointerSize));
+  __ ld(a4, MemOperand(a3, 0));
+  __ SmiScale(t2, a6, kPointerSizeLog2);
+  __ Daddu(t1, a1, Operand(t2));
+  __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
+  __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
+
+  __ bind(&arguments_test);
+  __ Branch(&arguments_loop, lt, a6, Operand(a5));
+
+  // Return.
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  // a5 = argument count (tagged)
+  __ bind(&runtime);
+  __ Push(a1, a3, a5);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a1 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- ra : return address
+  // -----------------------------------
+  __ AssertFunction(a1);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make a2 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ Branch(USE_DELAY_SLOT, &loop_entry);
+    __ mov(a2, fp);  // In delay slot.
+    __ bind(&loop);
+    __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ ld(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+    __ Branch(&loop, ne, a1, Operand(a3));
+  }
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ ld(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+  __ ld(a0, MemOperand(a3, StandardFrameConstants::kContextOffset));
+  __ Branch(&arguments_adaptor, eq, a0,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  {
+    __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a0,
+          FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ Dlsa(a2, a2, a0, kPointerSizeLog2);
+    __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+                             1 * kPointerSize));
+  }
+  __ Branch(&arguments_done);
+  __ bind(&arguments_adaptor);
+  {
+    __ SmiLoadUntag(
+        a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ Dlsa(a2, a3, a0, kPointerSizeLog2);
+    __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+                             1 * kPointerSize));
+  }
+  __ bind(&arguments_done);
+
+  // ----------- S t a t e -------------
+  //  -- cp : context
+  //  -- a0 : number of rest parameters
+  //  -- a2 : pointer to first rest parameters
+  //  -- ra : return address
+  // -----------------------------------
+
+  // Allocate space for the rest parameter array plus the backing store.
+  Label allocate, done_allocate;
+  __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ Dlsa(a1, a1, a0, kPointerSizeLog2);
+  __ Allocate(a1, v0, a3, a4, &allocate, TAG_OBJECT);
+  __ bind(&done_allocate);
+
+  // Compute arguments.length in a4.
+  __ SmiTag(a4, a0);
+
+  // Setup the elements array in v0.
+  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+  __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+  __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
+  __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
+  {
+    Label loop, done_loop;
+    __ Dlsa(a1, a3, a0, kPointerSizeLog2);
+    __ bind(&loop);
+    __ Branch(&done_loop, eq, a1, Operand(a3));
+    __ ld(at, MemOperand(a2, 0 * kPointerSize));
+    __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
+    __ Dsubu(a2, a2, Operand(1 * kPointerSize));
+    __ Daddu(a3, a3, Operand(1 * kPointerSize));
+    __ Branch(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Setup the strict arguments object in a3.
+  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
+  __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
+  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+  __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
+  __ sd(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
+  __ sd(a4, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
+  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a3);  // In delay slot
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(a0);
+    __ SmiTag(a1);
+    __ Push(a0, a2, a1);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Pop(a0, a2);
+    __ SmiUntag(a0);
+  }
+  __ jmp(&done_allocate);
+}
+
+
 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
   Register context_reg = cp;
   Register slot_reg = a2;
@@ -5183,8 +5369,7 @@
   }
 
   // Load the PropertyCell value at the specified slot.
-  __ dsll(at, slot_reg, kPointerSizeLog2);
-  __ Daddu(at, at, Operand(context_reg));
+  __ Dlsa(at, context_reg, slot_reg, kPointerSizeLog2);
   __ ld(result_reg, ContextMemOperand(at, 0));
   __ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
 
@@ -5222,8 +5407,7 @@
   }
 
   // Load the PropertyCell at the specified slot.
-  __ dsll(at, slot_reg, kPointerSizeLog2);
-  __ Daddu(at, at, Operand(context_reg));
+  __ Dlsa(at, context_reg, slot_reg, kPointerSizeLog2);
   __ ld(cell_reg, ContextMemOperand(at, 0));
 
   // Load PropertyDetails for the cell (actually only the cell_type and kind).
@@ -5451,11 +5635,10 @@
   __ jmp(&leave_exit_frame);
 }
 
-
 static void CallApiFunctionStubHelper(MacroAssembler* masm,
                                       const ParameterCount& argc,
                                       bool return_first_arg,
-                                      bool call_data_undefined) {
+                                      bool call_data_undefined, bool is_lazy) {
   // ----------- S t a t e -------------
   //  -- a0                  : callee
   //  -- a4                  : call_data
@@ -5491,8 +5674,10 @@
 
   // Save context, callee and call data.
   __ Push(context, callee, call_data);
-  // Load context from callee.
-  __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+  if (!is_lazy) {
+    // Load context from callee.
+    __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+  }
 
   Register scratch = call_data;
   if (!call_data_undefined) {
@@ -5577,7 +5762,7 @@
 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
   bool call_data_undefined = this->call_data_undefined();
   CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
-                            call_data_undefined);
+                            call_data_undefined, false);
 }
 
 
@@ -5585,41 +5770,49 @@
   bool is_store = this->is_store();
   int argc = this->argc();
   bool call_data_undefined = this->call_data_undefined();
+  bool is_lazy = this->is_lazy();
   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined);
+                            call_data_undefined, is_lazy);
 }
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- sp[0]                  : name
-  //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object
+  //  -- sp[0]                        : name
+  //  -- sp[8 .. (8 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
   //  -- ...
-  //  -- a2                     : api_function_address
+  //  -- a2                           : api_function_address
   // -----------------------------------
 
   Register api_function_address = ApiGetterDescriptor::function_address();
   DCHECK(api_function_address.is(a2));
 
-  __ mov(a0, sp);  // a0 = Handle<Name>
-  __ Daddu(a1, a0, Operand(1 * kPointerSize));  // a1 = PCA
+  // v8::PropertyCallbackInfo::args_ array and name handle.
+  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+  // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+  __ mov(a0, sp);                               // a0 = Handle<Name>
+  __ Daddu(a1, a0, Operand(1 * kPointerSize));  // a1 = v8::PCI::args_
 
   const int kApiStackSpace = 1;
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
-  // Create PropertyAccessorInfo instance on the stack above the exit frame with
-  // a1 (internal::Object** args_) as the data.
+  // Create v8::PropertyCallbackInfo object on the stack and initialize
+  // it's args_ field.
   __ sd(a1, MemOperand(sp, 1 * kPointerSize));
-  __ Daddu(a1, sp, Operand(1 * kPointerSize));  // a1 = AccessorInfo&
-
-  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+  __ Daddu(a1, sp, Operand(1 * kPointerSize));
+  // a1 = v8::PropertyCallbackInfo&
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
+
+  // +3 is to skip prolog, return address and name handle.
+  MemOperand return_value_operand(
+      fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
                            kStackUnwindSpace, kInvalidStackOffset,
-                           MemOperand(fp, 6 * kPointerSize), NULL);
+                           return_value_operand, NULL);
 }
 
 
diff --git a/src/mips64/codegen-mips64.cc b/src/mips64/codegen-mips64.cc
index 022426e..c8cde97 100644
--- a/src/mips64/codegen-mips64.cc
+++ b/src/mips64/codegen-mips64.cc
@@ -1078,8 +1078,7 @@
   __ And(at, result, Operand(kStringEncodingMask));
   __ Branch(&one_byte, ne, at, Operand(zero_reg));
   // Two-byte string.
-  __ dsll(at, index, 1);
-  __ Daddu(at, string, at);
+  __ Dlsa(at, string, index, 1);
   __ lhu(result, MemOperand(at));
   __ jmp(&done);
   __ bind(&one_byte);
@@ -1151,8 +1150,7 @@
 
   // Must not call ExpConstant() after overwriting temp3!
   __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
-  __ dsll(at, temp2, 3);
-  __ Daddu(temp3, temp3, Operand(at));
+  __ Dlsa(temp3, temp3, temp2, 3);
   __ lwu(temp2, MemOperand(temp3, Register::kMantissaOffset));
   __ lwu(temp3, MemOperand(temp3, Register::kExponentOffset));
   // The first word is loaded is the lower number register.
diff --git a/src/mips64/constants-mips64.h b/src/mips64/constants-mips64.h
index 226e3ed..57e947b 100644
--- a/src/mips64/constants-mips64.h
+++ b/src/mips64/constants-mips64.h
@@ -45,27 +45,6 @@
 #error Unknown endianness
 #endif
 
-// TODO(plind): consider deriving ABI from compiler flags or build system.
-
-// ABI-dependent definitions are made with #define in simulator-mips64.h,
-// so the ABI choice must be available to the pre-processor. However, in all
-// other cases, we should use the enum AbiVariants with normal if statements.
-
-#define MIPS_ABI_N64 1
-// #define MIPS_ABI_O32 1
-
-// The only supported Abi's are O32, and n64.
-enum AbiVariants {
-  kO32,
-  kN64  // Use upper case N for 'n64' ABI to conform to style standard.
-};
-
-#ifdef MIPS_ABI_N64
-static const AbiVariants kMipsAbi = kN64;
-#else
-static const AbiVariants kMipsAbi = kO32;
-#endif
-
 
 // TODO(plind): consider renaming these ...
 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
@@ -840,6 +819,7 @@
   kDontCheckForInexactConversion
 };
 
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
 
 // -----------------------------------------------------------------------------
 // Hints.
@@ -1184,7 +1164,7 @@
 // MIPS assembly various constants.
 
 // C/C++ argument slots size.
-const int kCArgSlotCount = (kMipsAbi == kN64) ? 0 : 4;
+const int kCArgSlotCount = 0;
 
 // TODO(plind): below should be based on kPointerSize
 // TODO(plind): find all usages and remove the needless instructions for n64.
@@ -1226,6 +1206,7 @@
     case SPECIAL3:
       switch (FunctionFieldRaw()) {
         case INS:
+        case DINS:
         case EXT:
         case DEXT:
         case DEXTM:
diff --git a/src/mips64/deoptimizer-mips64.cc b/src/mips64/deoptimizer-mips64.cc
index 8daba04..ec610f0 100644
--- a/src/mips64/deoptimizer-mips64.cc
+++ b/src/mips64/deoptimizer-mips64.cc
@@ -80,27 +80,6 @@
 }
 
 
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  // Set the register values. The values are not important as there are no
-  // callee saved registers in JavaScript frames, so all registers are
-  // spilled. Registers fp and sp are set to the correct values though.
-
-  for (int i = 0; i < Register::kNumRegisters; i++) {
-    input_->SetRegister(i, i * 4);
-  }
-  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
-  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
-    input_->SetDoubleRegister(i, 0.0);
-  }
-
-  // Fill the frame content from the actual data on the frame.
-  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
-    input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
-  }
-}
-
-
 void Deoptimizer::SetPlatformCompiledStubRegisters(
     FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
   ApiFunction function(descriptor->deoptimization_handler());
@@ -119,8 +98,7 @@
   }
 }
 
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
   // There is no dynamic alignment padding on MIPS in the input frame.
   return false;
 }
@@ -188,15 +166,9 @@
   __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   // a2: bailout id already loaded.
   // a3: code address or 0 already loaded.
-  if (kMipsAbi == kN64) {
-    // a4: already has fp-to-sp delta.
-    __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
-  } else {  // O32 abi.
-    // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
-    __ sd(a4, CFunctionArgumentOperand(5));  // Fp-to-sp delta.
-    __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
-    __ sd(a5, CFunctionArgumentOperand(6));  // Isolate.
-  }
+  // a4: already has fp-to-sp delta.
+  __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
+
   // Call Deoptimizer::New().
   {
     AllowExternalCallThatCantCauseGC scope(masm());
@@ -273,8 +245,7 @@
   // a1 = one past the last FrameDescription**.
   __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
   __ ld(a4, MemOperand(a0, Deoptimizer::output_offset()));  // a4 is output_.
-  __ dsll(a1, a1, kPointerSizeLog2);  // Count to offset.
-  __ daddu(a1, a4, a1);  // a1 = one past the last FrameDescription**.
+  __ Dlsa(a1, a4, a1, kPointerSizeLog2);
   __ BranchShort(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
diff --git a/src/mips64/interface-descriptors-mips64.cc b/src/mips64/interface-descriptors-mips64.cc
index c5c1311..73df66e 100644
--- a/src/mips64/interface-descriptors-mips64.cc
+++ b/src/mips64/interface-descriptors-mips64.cc
@@ -54,20 +54,6 @@
 const Register StringCompareDescriptor::RightRegister() { return a0; }
 
 
-const Register ArgumentsAccessReadDescriptor::index() { return a1; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return a1; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return a2; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return a3; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return a4; }
-
-
 const Register ApiGetterDescriptor::function_address() { return a2; }
 
 
@@ -96,6 +82,32 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {a1, a3};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {a1};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {a1};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {a1};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
 
 void ToNumberDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -113,6 +125,10 @@
 
 
 // static
+const Register ToNameDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
 const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
 
 
@@ -165,13 +181,6 @@
 }
 
 
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a3, a0};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {a1, a3};
@@ -407,6 +416,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+      kInterpreterDispatchTableRegister};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -418,7 +435,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
@@ -430,7 +446,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterCEntryDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
index 7b73ac7..b49fa76 100644
--- a/src/mips64/macro-assembler-mips64.cc
+++ b/src/mips64/macro-assembler-mips64.cc
@@ -161,9 +161,9 @@
                                 Condition cc,
                                 Label* branch) {
   DCHECK(cc == eq || cc == ne);
-  And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
-  Branch(branch, cc, scratch,
-         Operand(ExternalReference::new_space_start(isolate())));
+  const int mask =
+      1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
+  CheckPageFlag(object, scratch, mask, cc, branch);
 }
 
 
@@ -371,6 +371,67 @@
   }
 }
 
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+                                               Register code_entry,
+                                               Register scratch) {
+  const int offset = JSFunction::kCodeEntryOffset;
+
+  // Since a code entry (value) is always in old space, we don't need to update
+  // remembered set. If incremental marking is off, there is nothing for us to
+  // do.
+  if (!FLAG_incremental_marking) return;
+
+  DCHECK(js_function.is(a1));
+  DCHECK(code_entry.is(a4));
+  DCHECK(scratch.is(a5));
+  AssertNotSmi(js_function);
+
+  if (emit_debug_code()) {
+    Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
+    ld(at, MemOperand(scratch));
+    Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
+           Operand(code_entry));
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+
+  CheckPageFlag(code_entry, scratch,
+                MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+  CheckPageFlag(js_function, scratch,
+                MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+  const Register dst = scratch;
+  Daddu(dst, js_function, Operand(offset - kHeapObjectTag));
+
+  // Save caller-saved registers. js_function and code_entry are in the
+  // caller-saved register list.
+  DCHECK(kJSCallerSaved & js_function.bit());
+  DCHECK(kJSCallerSaved & code_entry.bit());
+  MultiPush(kJSCallerSaved | ra.bit());
+
+  int argument_count = 3;
+
+  PrepareCallCFunction(argument_count, code_entry);
+
+  Move(a0, js_function);
+  Move(a1, dst);
+  li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+  {
+    AllowExternalCallThatCantCauseGC scope(this);
+    CallCFunction(
+        ExternalReference::incremental_marking_record_write_code_entry_function(
+            isolate()),
+        argument_count);
+  }
+
+  // Restore caller-saved registers.
+  MultiPop(kJSCallerSaved | ra.bit());
+
+  bind(&done);
+}
 
 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
                                          Register address,
@@ -503,16 +564,14 @@
   // hash = ~hash + (hash << 15);
   // The algorithm uses 32-bit integer values.
   nor(scratch, reg0, zero_reg);
-  sll(at, reg0, 15);
-  addu(reg0, scratch, at);
+  Lsa(reg0, scratch, reg0, 15);
 
   // hash = hash ^ (hash >> 12);
   srl(at, reg0, 12);
   xor_(reg0, reg0, at);
 
   // hash = hash + (hash << 2);
-  sll(at, reg0, 2);
-  addu(reg0, reg0, at);
+  Lsa(reg0, reg0, reg0, 2);
 
   // hash = hash ^ (hash >> 4);
   srl(at, reg0, 4);
@@ -520,8 +579,7 @@
 
   // hash = hash * 2057;
   sll(scratch, reg0, 11);
-  sll(at, reg0, 3);
-  addu(reg0, reg0, at);
+  Lsa(reg0, reg0, reg0, 3);
   addu(reg0, reg0, scratch);
 
   // hash = hash ^ (hash >> 16);
@@ -581,12 +639,10 @@
 
     // Scale the index by multiplying by the element size.
     DCHECK(SeededNumberDictionary::kEntrySize == 3);
-    dsll(at, reg2, 1);  // 2x.
-    daddu(reg2, reg2, at);  // reg2 = reg2 * 3.
+    Dlsa(reg2, reg2, reg2, 1);  // reg2 = reg2 * 3.
 
     // Check if the key is identical to the name.
-    dsll(at, reg2, kPointerSizeLog2);
-    daddu(reg2, elements, at);
+    Dlsa(reg2, elements, reg2, kPointerSizeLog2);
 
     ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
     if (i != kNumberDictionaryProbes - 1) {
@@ -1302,6 +1358,35 @@
   }
 }
 
+static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
+  if ((imm >> (bitnum - 1)) & 0x1) {
+    imm = (imm >> bitnum) + 1;
+  } else {
+    imm = imm >> bitnum;
+  }
+  return imm;
+}
+
+bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
+  bool higher_bits_sign_extended = false;
+  if (is_int16(j.imm64_)) {
+    daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
+  } else if (!(j.imm64_ & kHiMask)) {
+    ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
+  } else if (!(j.imm64_ & kImm16Mask)) {
+    lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
+    if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
+      higher_bits_sign_extended = true;
+    }
+  } else {
+    lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
+    ori(rd, rd, (j.imm64_ & kImm16Mask));
+    if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
+      higher_bits_sign_extended = true;
+    }
+  }
+  return higher_bits_sign_extended;
+}
 
 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
   DCHECK(!j.is_reg());
@@ -1309,50 +1394,57 @@
   if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
     // Normal load of an immediate value which does not need Relocation Info.
     if (is_int32(j.imm64_)) {
-      if (is_int16(j.imm64_)) {
-        daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
-      } else if (!(j.imm64_ & kHiMask)) {
-        ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
-      } else if (!(j.imm64_ & kImm16Mask)) {
-        lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
-      } else {
-        lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
-        ori(rd, rd, (j.imm64_ & kImm16Mask));
-      }
+      LiLower32BitHelper(rd, j);
     } else {
-      if (is_int48(j.imm64_)) {
-        if ((j.imm64_ >> 32) & kImm16Mask) {
-          lui(rd, (j.imm64_ >> 32) & kImm16Mask);
-          if ((j.imm64_ >> 16) & kImm16Mask) {
-            ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
-          }
-        } else {
-          ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
+      if (kArchVariant == kMips64r6) {
+        int64_t imm = j.imm64_;
+        bool higher_bits_sign_extended = LiLower32BitHelper(rd, j);
+        imm = ShiftAndFixSignExtension(imm, 32);
+        // If LUI writes 1s to higher bits, we need both DAHI/DATI.
+        if ((imm & kImm16Mask) ||
+            (higher_bits_sign_extended && (j.imm64_ > 0))) {
+          dahi(rd, imm & kImm16Mask);
         }
-        dsll(rd, rd, 16);
-        if (j.imm64_ & kImm16Mask) {
-          ori(rd, rd, j.imm64_ & kImm16Mask);
+        imm = ShiftAndFixSignExtension(imm, 16);
+        if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) ||
+            (higher_bits_sign_extended && (j.imm64_ > 0))) {
+          dati(rd, imm & kImm16Mask);
         }
       } else {
-        lui(rd, (j.imm64_ >> 48) & kImm16Mask);
-        if ((j.imm64_ >> 32) & kImm16Mask) {
-          ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
-        }
-        if ((j.imm64_ >> 16) & kImm16Mask) {
-          dsll(rd, rd, 16);
-          ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
-          if (j.imm64_ & kImm16Mask) {
-            dsll(rd, rd, 16);
-            ori(rd, rd, j.imm64_ & kImm16Mask);
+        if (is_int48(j.imm64_)) {
+          if ((j.imm64_ >> 32) & kImm16Mask) {
+            lui(rd, (j.imm64_ >> 32) & kImm16Mask);
+            if ((j.imm64_ >> 16) & kImm16Mask) {
+              ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+            }
           } else {
-            dsll(rd, rd, 16);
+            ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
+          }
+          dsll(rd, rd, 16);
+          if (j.imm64_ & kImm16Mask) {
+            ori(rd, rd, j.imm64_ & kImm16Mask);
           }
         } else {
-          if (j.imm64_ & kImm16Mask) {
-            dsll32(rd, rd, 0);
-            ori(rd, rd, j.imm64_ & kImm16Mask);
+          lui(rd, (j.imm64_ >> 48) & kImm16Mask);
+          if ((j.imm64_ >> 32) & kImm16Mask) {
+            ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+          }
+          if ((j.imm64_ >> 16) & kImm16Mask) {
+            dsll(rd, rd, 16);
+            ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+            if (j.imm64_ & kImm16Mask) {
+              dsll(rd, rd, 16);
+              ori(rd, rd, j.imm64_ & kImm16Mask);
+            } else {
+              dsll(rd, rd, 16);
+            }
           } else {
-            dsll32(rd, rd, 0);
+            if (j.imm64_ & kImm16Mask) {
+              dsll32(rd, rd, 0);
+              ori(rd, rd, j.imm64_ & kImm16Mask);
+            } else {
+              dsll32(rd, rd, 0);
+            }
           }
         }
       }
@@ -1371,12 +1463,32 @@
     dsll(rd, rd, 16);
     ori(rd, rd, j.imm64_ & kImm16Mask);
   } else {
-    lui(rd, (j.imm64_ >> 48) & kImm16Mask);
-    ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
-    dsll(rd, rd, 16);
-    ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
-    dsll(rd, rd, 16);
-    ori(rd, rd, j.imm64_ & kImm16Mask);
+    if (kArchVariant == kMips64r6) {
+      int64_t imm = j.imm64_;
+      lui(rd, (imm >> kLuiShift) & kImm16Mask);
+      if (imm & kImm16Mask) {
+        ori(rd, rd, (imm & kImm16Mask));
+      }
+      if ((imm >> 31) & 0x1) {
+        imm = (imm >> 32) + 1;
+      } else {
+        imm = imm >> 32;
+      }
+      dahi(rd, imm & kImm16Mask);
+      if ((imm >> 15) & 0x1) {
+        imm = (imm >> 16) + 1;
+      } else {
+        imm = imm >> 16;
+      }
+      dati(rd, imm & kImm16Mask);
+    } else {
+      lui(rd, (j.imm64_ >> 48) & kImm16Mask);
+      ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+      dsll(rd, rd, 16);
+      ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+      dsll(rd, rd, 16);
+      ori(rd, rd, j.imm64_ & kImm16Mask);
+    }
   }
 }
 
@@ -1596,6 +1708,22 @@
   bind(&conversion_done);
 }
 
+void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
+  // Move the data from fs to t8.
+  mfc1(t8, fs);
+  Cvt_s_uw(fd, t8);
+}
+
+void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
+  // Convert rs to a FP value in fd.
+  DCHECK(!rs.is(t9));
+  DCHECK(!rs.is(at));
+
+  // Zero extend int32 in rs.
+  Dext(t9, rs, 0, 32);
+  dmtc1(t9, fd);
+  cvt_s_l(fd, fd);
+}
 
 void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
   // Move the data from fs to t8.
@@ -1672,6 +1800,12 @@
   mtc1(t8, fd);
 }
 
+void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
+                                FPURegister scratch) {
+  Trunc_uw_s(fs, t8, scratch);
+  mtc1(t8, fd);
+}
+
 void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
                                 FPURegister scratch, Register result) {
   Trunc_ul_d(fs, t8, scratch, result);
@@ -1738,6 +1872,35 @@
   bind(&done);
 }
 
+void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
+                                FPURegister scratch) {
+  DCHECK(!fd.is(scratch));
+  DCHECK(!rs.is(at));
+
+  // Load 2^31 into scratch as its float representation.
+  li(at, 0x4F000000);
+  mtc1(at, scratch);
+  // Test if scratch > fd.
+  // If fd < 2^31 we can convert it normally.
+  Label simple_convert;
+  BranchF32(&simple_convert, NULL, lt, fd, scratch);
+
+  // First we subtract 2^31 from fd, then trunc it to rs
+  // and add 2^31 to rs.
+  sub_s(scratch, fd, scratch);
+  trunc_w_s(scratch, scratch);
+  mfc1(rs, scratch);
+  Or(rs, rs, 1 << 31);
+
+  Label done;
+  Branch(&done);
+  // Simple conversion.
+  bind(&simple_convert);
+  trunc_w_s(scratch, fd);
+  mfc1(rs, scratch);
+
+  bind(&done);
+}
 
 void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
                                 FPURegister scratch, Register result) {
@@ -3714,7 +3877,7 @@
     return;
   }
 
-  DCHECK(!AreAliased(result, scratch1, scratch2, t9));
+  DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
 
   // Make object size into bytes.
   if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3792,8 +3955,8 @@
   }
 
   // |object_size| and |result_end| may overlap, other registers must not.
-  DCHECK(!AreAliased(object_size, result, scratch, t9));
-  DCHECK(!AreAliased(result_end, result, scratch, t9));
+  DCHECK(!AreAliased(object_size, result, scratch, t9, at));
+  DCHECK(!AreAliased(result_end, result, scratch, t9, at));
 
   // Check relative positions of allocation top and limit addresses.
   // ARM adds additional checks to make sure the ldm instruction can be
@@ -3839,8 +4002,7 @@
   // to calculate the new top. Object size may be in words so a shift is
   // required to get the number of bytes.
   if ((flags & SIZE_IN_WORDS) != 0) {
-    dsll(result_end, object_size, kPointerSizeLog2);
-    Daddu(result_end, result, result_end);
+    Dlsa(result_end, result, object_size, kPointerSizeLog2);
   } else {
     Daddu(result_end, result, Operand(object_size));
   }
@@ -4365,7 +4527,7 @@
 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
                                           DoubleRegister src2) {
   if (!IsMipsSoftFloatABI) {
-    const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
+    const DoubleRegister fparg2 = f13;
     if (src2.is(f12)) {
       DCHECK(!src1.is(fparg2));
       Move(fparg2, src2);
@@ -4479,7 +4641,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -5230,18 +5392,6 @@
 }
 
 
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                                   const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  DCHECK(flag == JUMP_FUNCTION || has_frame());
-
-  // Fake a parameter count to avoid emitting code to do the check.
-  ParameterCount expected(0);
-  LoadNativeContextSlot(native_context_index, a1);
-  InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
                                 Register scratch1, Register scratch2) {
   if (FLAG_native_code_counters && counter->Enabled()) {
@@ -5338,9 +5488,9 @@
     // We don't actually want to generate a pile of code for this, so just
     // claim there is a stack frame, without generating one.
     FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   } else {
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   }
   // Will not return here.
   if (is_trampoline_pool_blocked()) {
@@ -5596,8 +5746,7 @@
     if (argument_count_is_length) {
       daddu(sp, sp, argument_count);
     } else {
-      dsll(t8, argument_count, kPointerSizeLog2);
-      daddu(sp, sp, t8);
+      Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8);
     }
   }
 
@@ -5880,6 +6029,17 @@
 }
 
 
+void MacroAssembler::AssertReceiver(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    SmiTst(object, t8);
+    Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
+    GetObjectType(object, t8, t8);
+    Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
+  }
+}
+
+
 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
                                                      Register scratch) {
   if (emit_debug_code()) {
@@ -5969,8 +6129,7 @@
   Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
 }
 
-
-static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
+static const int kRegisterPassedArguments = 8;
 
 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
                                               int num_double_arguments) {
@@ -6185,8 +6344,7 @@
   Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
   Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
-  dsll(t8, t8, Bitmap::kBytesPerCellLog2);
-  Daddu(bitmap_reg, bitmap_reg, t8);
+  Dlsa(bitmap_reg, bitmap_reg, t8, Bitmap::kBytesPerCellLog2);
   li(t8, Operand(1));
   dsllv(mask_reg, t8, mask_reg);
 }
@@ -6251,7 +6409,8 @@
 }
 
 
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+  Register null_value = a5;
   Register  empty_fixed_array_value = a6;
   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   Label next, start;
@@ -6265,6 +6424,7 @@
   Branch(
       call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
 
+  LoadRoot(null_value, Heap::kNullValueRootIndex);
   jmp(&start);
 
   bind(&next);
diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h
index 31ed8a3..7f44ab9 100644
--- a/src/mips64/macro-assembler-mips64.h
+++ b/src/mips64/macro-assembler-mips64.h
@@ -15,6 +15,7 @@
 // Give alias names to registers for calling conventions.
 const Register kReturnRegister0 = {Register::kCode_v0};
 const Register kReturnRegister1 = {Register::kCode_v1};
+const Register kReturnRegister2 = {Register::kCode_a0};
 const Register kJSFunctionRegister = {Register::kCode_a1};
 const Register kContextRegister = {Register::kCpRegister};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
@@ -235,6 +236,11 @@
               Heap::RootListIndex index,
               BranchDelaySlot bdslot = PROTECT);
 
+  // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+  // functor/function with 'Label *func(size_t index)' declaration.
+  template <typename Func>
+  void GenerateSwitchTable(Register index, size_t case_count,
+                           Func GetLabelFunction);
 #undef COND_ARGS
 
   // Emit code to discard a non-negative number of pointer-sized elements
@@ -385,7 +391,7 @@
   void JumpIfNotInNewSpace(Register object,
                            Register scratch,
                            Label* branch) {
-    InNewSpace(object, scratch, ne, branch);
+    InNewSpace(object, scratch, eq, branch);
   }
 
   // Check if object is in new space.  Jumps if the object is in new space.
@@ -393,7 +399,7 @@
   void JumpIfInNewSpace(Register object,
                         Register scratch,
                         Label* branch) {
-    InNewSpace(object, scratch, eq, branch);
+    InNewSpace(object, scratch, ne, branch);
   }
 
   // Check if an object has a given incremental marking color.
@@ -455,6 +461,11 @@
                      pointers_to_here_check_for_value);
   }
 
+  // Notify the garbage collector that we wrote a code entry into a
+  // JSFunction. Only scratch is clobbered by the operation.
+  void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+                                 Register scratch);
+
   void RecordWriteForMap(
       Register object,
       Register map,
@@ -688,6 +699,7 @@
 
   // Load int32 in the rd register.
   void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+  inline bool LiLower32BitHelper(Register rd, Operand j);
   inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
     li(rd, Operand(j), mode);
   }
@@ -821,6 +833,10 @@
   void Cvt_d_ul(FPURegister fd, FPURegister fs);
   void Cvt_d_ul(FPURegister fd, Register rs);
 
+  // Convert unsigned word to float.
+  void Cvt_s_uw(FPURegister fd, FPURegister fs);
+  void Cvt_s_uw(FPURegister fd, Register rs);
+
   // Convert unsigned long to float.
   void Cvt_s_ul(FPURegister fd, FPURegister fs);
   void Cvt_s_ul(FPURegister fd, Register rs);
@@ -837,6 +853,10 @@
   void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
   void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
 
+  // Convert single to unsigned word.
+  void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
+  void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
+
   // Convert double to unsigned long.
   void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch,
                   Register result = no_reg);
@@ -1121,6 +1141,11 @@
                      Register map,
                      Register type_reg);
 
+  void GetInstanceType(Register object_map, Register object_instance_type) {
+    lbu(object_instance_type,
+        FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+  }
+
   // Check if a map for a JSObject indicates that the object has fast elements.
   // Jump to the specified label if it does not.
   void CheckFastElements(Register map,
@@ -1449,10 +1474,6 @@
   void JumpToExternalReference(const ExternalReference& builtin,
                                BranchDelaySlot bd = PROTECT);
 
-  // Invoke specified builtin JavaScript function.
-  void InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                     const CallWrapper& call_wrapper = NullCallWrapper());
-
   struct Unresolved {
     int pc;
     uint32_t flags;  // See Bootstrapper::FixupFlags decoders/encoders.
@@ -1644,6 +1665,9 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+  void AssertReceiver(Register object);
+
   // Abort execution if argument is not undefined or an AllocationSite, enabled
   // via --debug-code.
   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1747,7 +1771,7 @@
 
   // Expects object in a0 and returns map with validated enum cache
   // in a0.  Assumes that any other register can be used as a scratch.
-  void CheckEnumCache(Register null_value, Label* call_runtime);
+  void CheckEnumCache(Label* call_runtime);
 
   // AllocationMemento support. Arrays may have an associated
   // AllocationMemento object that can be checked for in order to pretransition
@@ -1836,9 +1860,8 @@
                            Register scratch2);
 
   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cond,  // eq for new space, ne otherwise.
+  void InNewSpace(Register object, Register scratch,
+                  Condition cond,  // ne for new space, eq otherwise.
                   Label* branch);
 
   // Helper for finding the mark bits for an address.  Afterwards, the
@@ -1901,7 +1924,36 @@
   FlushICache flush_cache_;  // Whether to flush the I cache after patching.
 };
 
-
+template <typename Func>
+void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
+                                         Func GetLabelFunction) {
+  // Ensure that dd-ed labels following this instruction use 8 bytes aligned
+  // addresses.
+  if (kArchVariant >= kMips64r6) {
+    BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 6);
+    // Opposite of Align(8) as we have odd number of instructions in this case.
+    if ((pc_offset() & 7) == 0) {
+      nop();
+    }
+    addiupc(at, 5);
+    dlsa(at, at, index, kPointerSizeLog2);
+    ld(at, MemOperand(at));
+  } else {
+    Label here;
+    BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
+    Align(8);
+    bal(&here);
+    dsll(at, index, kPointerSizeLog2);  // Branch delay slot.
+    bind(&here);
+    daddu(at, at, ra);
+    ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+  }
+  jr(at);
+  nop();  // Branch delay slot nop.
+  for (size_t index = 0; index < case_count; ++index) {
+    dd(GetLabelFunction(index));
+  }
+}
 
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/src/mips64/simulator-mips64.cc b/src/mips64/simulator-mips64.cc
index 7fa9644..70c06c8 100644
--- a/src/mips64/simulator-mips64.cc
+++ b/src/mips64/simulator-mips64.cc
@@ -16,6 +16,7 @@
 #include "src/mips64/constants-mips64.h"
 #include "src/mips64/simulator-mips64.h"
 #include "src/ostreams.h"
+#include "src/runtime/runtime-utils.h"
 
 // Only build the simulator if not compiling for real MIPS hardware.
 #if defined(USE_SIMULATOR)
@@ -520,7 +521,8 @@
           HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
           int64_t value = *cur;
           Heap* current_heap = sim_->isolate_->heap();
-          if (((value & 1) == 0) || current_heap->Contains(obj)) {
+          if (((value & 1) == 0) ||
+              current_heap->ContainsSlow(obj->address())) {
             PrintF(" (");
             if ((value & 1) == 0) {
               PrintF("smi %d", static_cast<int>(value >> 32));
@@ -1159,7 +1161,7 @@
 // from a0-a3 or f12 and f13 (n64), or f14 (O32).
 void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
   if (!IsMipsSoftFloatABI) {
-    const int fparg2 = (kMipsAbi == kN64) ? 13 : 14;
+    const int fparg2 = 13;
     *x = get_fpu_register_double(12);
     *y = get_fpu_register_double(fparg2);
     *z = static_cast<int32_t>(get_register(a2));
@@ -1964,11 +1966,6 @@
 // 64 bits of result. If they don't, the v1 result register contains a bogus
 // value, which is fine because it is caller-saved.
 
-struct ObjectPair {
-  Object* x;
-  Object* y;
-};
-
 typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
                                         int64_t arg1,
                                         int64_t arg2,
@@ -1976,6 +1973,9 @@
                                         int64_t arg4,
                                         int64_t arg5);
 
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
+                                                   int64_t arg2, int64_t arg3,
+                                                   int64_t arg4);
 
 // These prototypes handle the four types of FP calls.
 typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
@@ -2010,15 +2010,9 @@
     int64_t arg3 = get_register(a3);
     int64_t arg4, arg5;
 
-    if (kMipsAbi == kN64) {
-      arg4 = get_register(a4);  // Abi n64 register a4.
-      arg5 = get_register(a5);  // Abi n64 register a5.
-    } else {  // Abi O32.
-      int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
-      // Args 4 and 5 are on the stack after the reserved space for args 0..3.
-      arg4 = stack_pointer[4];
-      arg5 = stack_pointer[5];
-    }
+    arg4 = get_register(a4);  // Abi n64 register a4.
+    arg5 = get_register(a5);  // Abi n64 register a5.
+
     bool fp_call =
          (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
          (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
@@ -2175,7 +2169,30 @@
       SimulatorRuntimeProfilingGetterCall target =
           reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
       target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+    } else if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
+      // builtin call returning ObjectTriple.
+      SimulatorRuntimeTripleCall target =
+          reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF(
+            "Call to host triple returning runtime function %p "
+            "args %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64
+            ", %016" PRIx64 "\n",
+            FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+      }
+      // arg0 is a hidden argument pointing to the return location, so don't
+      // pass it to the target function.
+      ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+      }
+      // Return is passed back in address pointed to by hidden first argument.
+      ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
+      *sim_result = result;
+      set_register(v0, arg0);
     } else {
+      DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+             redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
       SimulatorRuntimeCall target =
                   reinterpret_cast<SimulatorRuntimeCall>(external);
       if (::v8::internal::FLAG_trace_sim) {
@@ -2316,6 +2333,89 @@
            static_cast<int>(e));
 }
 
+// Min/Max template functions for Double and Single arguments.
+
+template <typename T>
+static T FPAbs(T a);
+
+template <>
+double FPAbs<double>(double a) {
+  return fabs(a);
+}
+
+template <>
+float FPAbs<float>(float a) {
+  return fabsf(a);
+}
+
+template <typename T>
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+  if (std::isnan(a) && std::isnan(b)) {
+    result = a;
+  } else if (std::isnan(a)) {
+    result = b;
+  } else if (std::isnan(b)) {
+    result = a;
+  } else if (b == a) {
+    // Handle -0.0 == 0.0 case.
+    // std::signbit() returns int 0 or 1 so substracting MaxMinKind::kMax
+    // negates the result.
+    result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+  } else {
+    return false;
+  }
+  return true;
+}
+
+template <typename T>
+static T FPUMin(T a, T b) {
+  T result;
+  if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+    return result;
+  } else {
+    return b < a ? b : a;
+  }
+}
+
+template <typename T>
+static T FPUMax(T a, T b) {
+  T result;
+  if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+    return result;
+  } else {
+    return b > a ? b : a;
+  }
+}
+
+template <typename T>
+static T FPUMinA(T a, T b) {
+  T result;
+  if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+    if (FPAbs(a) < FPAbs(b)) {
+      result = a;
+    } else if (FPAbs(b) < FPAbs(a)) {
+      result = b;
+    } else {
+      result = a < b ? a : b;
+    }
+  }
+  return result;
+}
+
+template <typename T>
+static T FPUMaxA(T a, T b) {
+  T result;
+  if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+    if (FPAbs(a) > FPAbs(b)) {
+      result = a;
+    } else if (FPAbs(b) > FPAbs(a)) {
+      result = b;
+    } else {
+      result = a > b ? a : b;
+    }
+  }
+  return result;
+}
 
 // Handle execution based on instruction types.
 
@@ -2600,71 +2700,19 @@
     }
     case MINA:
       DCHECK(kArchVariant == kMips64r6);
-      fs = get_fpu_register_float(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else {
-        float result;
-        if (fabs(fs) > fabs(ft)) {
-          result = ft;
-        } else if (fabs(fs) < fabs(ft)) {
-          result = fs;
-        } else {
-          result = (fs < ft ? fs : ft);
-        }
-        set_fpu_register_float(fd_reg(), result);
-      }
+      set_fpu_register_float(fd_reg(), FPUMinA(ft, fs));
       break;
     case MAXA:
       DCHECK(kArchVariant == kMips64r6);
-      fs = get_fpu_register_float(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else {
-        float result;
-        if (fabs(fs) < fabs(ft)) {
-          result = ft;
-        } else if (fabs(fs) > fabs(ft)) {
-          result = fs;
-        } else {
-          result = (fs > ft ? fs : ft);
-        }
-        set_fpu_register_float(fd_reg(), result);
-      }
+      set_fpu_register_float(fd_reg(), FPUMaxA(ft, fs));
       break;
     case MIN:
       DCHECK(kArchVariant == kMips64r6);
-      fs = get_fpu_register_float(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else {
-        set_fpu_register_float(fd_reg(), (fs >= ft) ? ft : fs);
-      }
+      set_fpu_register_float(fd_reg(), FPUMin(ft, fs));
       break;
     case MAX:
       DCHECK(kArchVariant == kMips64r6);
-      fs = get_fpu_register_float(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_float(fd_reg(), fs);
-      } else {
-        set_fpu_register_float(fd_reg(), (fs <= ft) ? ft : fs);
-      }
+      set_fpu_register_float(fd_reg(), FPUMax(ft, fs));
       break;
     case SEL:
       DCHECK(kArchVariant == kMips64r6);
@@ -2809,71 +2857,19 @@
     }
     case MINA:
       DCHECK(kArchVariant == kMips64r6);
-      fs = get_fpu_register_double(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else {
-        double result;
-        if (fabs(fs) > fabs(ft)) {
-          result = ft;
-        } else if (fabs(fs) < fabs(ft)) {
-          result = fs;
-        } else {
-          result = (fs < ft ? fs : ft);
-        }
-        set_fpu_register_double(fd_reg(), result);
-      }
+      set_fpu_register_double(fd_reg(), FPUMinA(ft, fs));
       break;
     case MAXA:
       DCHECK(kArchVariant == kMips64r6);
-      fs = get_fpu_register_double(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else {
-        double result;
-        if (fabs(fs) < fabs(ft)) {
-          result = ft;
-        } else if (fabs(fs) > fabs(ft)) {
-          result = fs;
-        } else {
-          result = (fs > ft ? fs : ft);
-        }
-        set_fpu_register_double(fd_reg(), result);
-      }
+      set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
       break;
     case MIN:
       DCHECK(kArchVariant == kMips64r6);
-      fs = get_fpu_register_double(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else {
-        set_fpu_register_double(fd_reg(), (fs >= ft) ? ft : fs);
-      }
+      set_fpu_register_double(fd_reg(), FPUMin(ft, fs));
       break;
     case MAX:
       DCHECK(kArchVariant == kMips64r6);
-      fs = get_fpu_register_double(fs_reg());
-      if (std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else if (std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), ft);
-      } else if (!std::isnan(fs) && std::isnan(ft)) {
-        set_fpu_register_double(fd_reg(), fs);
-      } else {
-        set_fpu_register_double(fd_reg(), (fs <= ft) ? ft : fs);
-      }
+      set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
       break;
     case ADD_D:
       set_fpu_register_double(fd_reg(), fs + ft);
@@ -4777,7 +4773,7 @@
 
 
 int64_t Simulator::Call(byte* entry, int argument_count, ...) {
-  const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
+  const int kRegisterPassedArguments = 8;
   va_list parameters;
   va_start(parameters, argument_count);
   // Set up arguments.
@@ -4789,14 +4785,12 @@
   set_register(a2, va_arg(parameters, int64_t));
   set_register(a3, va_arg(parameters, int64_t));
 
-  if (kMipsAbi == kN64) {
-    // Up to eight arguments passed in registers in N64 ABI.
-    // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
-    if (argument_count >= 5) set_register(a4, va_arg(parameters, int64_t));
-    if (argument_count >= 6) set_register(a5, va_arg(parameters, int64_t));
-    if (argument_count >= 7) set_register(a6, va_arg(parameters, int64_t));
-    if (argument_count >= 8) set_register(a7, va_arg(parameters, int64_t));
-  }
+  // Up to eight arguments passed in registers in N64 ABI.
+  // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
+  if (argument_count >= 5) set_register(a4, va_arg(parameters, int64_t));
+  if (argument_count >= 6) set_register(a5, va_arg(parameters, int64_t));
+  if (argument_count >= 7) set_register(a6, va_arg(parameters, int64_t));
+  if (argument_count >= 8) set_register(a7, va_arg(parameters, int64_t));
 
   // Remaining arguments passed on stack.
   int64_t original_stack = get_register(sp);
@@ -4831,7 +4825,7 @@
 
 double Simulator::CallFP(byte* entry, double d0, double d1) {
   if (!IsMipsSoftFloatABI) {
-    const FPURegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
+    const FPURegister fparg2 = f13;
     set_fpu_register_double(f12, d0);
     set_fpu_register_double(fparg2, d1);
   } else {
diff --git a/src/mips64/simulator-mips64.h b/src/mips64/simulator-mips64.h
index 1d156d8..7f60a74 100644
--- a/src/mips64/simulator-mips64.h
+++ b/src/mips64/simulator-mips64.h
@@ -31,7 +31,6 @@
 // should act as a function matching the type arm_regexp_matcher.
 // The fifth (or ninth) argument is a dummy that reserves the space used for
 // the return address added by the ExitFrame in native calls.
-#ifdef MIPS_ABI_N64
 typedef int (*mips_regexp_matcher)(String* input,
                                    int64_t start_offset,
                                    const byte* input_start,
@@ -48,26 +47,6 @@
   (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7,   \
                                              NULL, p8))
 
-#else  // O32 Abi.
-
-typedef int (*mips_regexp_matcher)(String* input,
-                                   int32_t start_offset,
-                                   const byte* input_start,
-                                   const byte* input_end,
-                                   void* return_address,
-                                   int* output,
-                                   int32_t output_size,
-                                   Address stack_base,
-                                   int32_t direct_call,
-                                   Isolate* isolate);
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
-                                   p7, p8)                                     \
-  (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
-                                             p7, p8))
-
-#endif  // MIPS_ABI_N64
-
 
 // The stack limit beyond which we will throw stack overflow errors in
 // generated code. Because generated code on mips uses the C stack, we
@@ -516,18 +495,11 @@
       reinterpret_cast<int64_t*>(p3), reinterpret_cast<int64_t*>(p4)))
 
 
-#ifdef MIPS_ABI_N64
 #define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
                                    p7, p8)                                     \
   static_cast<int>(Simulator::current(isolate)->Call(                          \
       entry, 10, p0, p1, p2, p3, p4, reinterpret_cast<int64_t*>(p5), p6, p7,   \
       NULL, p8))
-#else  // Must be O32 Abi.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
-                                   p7, p8)                                     \
-  static_cast<int>(Simulator::current(isolate)->Call(                          \
-      entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
-#endif  // MIPS_ABI_N64
 
 
 // The simulator has its own stack. Thus it has a different stack limit from
diff --git a/src/objects-body-descriptors-inl.h b/src/objects-body-descriptors-inl.h
index ba3c4be..cd4be13 100644
--- a/src/objects-body-descriptors-inl.h
+++ b/src/objects-body-descriptors-inl.h
@@ -193,19 +193,24 @@
 class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
  public:
   static bool IsValidSlot(HeapObject* obj, int offset) {
-    return offset == kConstantPoolOffset;
+    return offset >= kConstantPoolOffset &&
+           offset <= kSourcePositionTableOffset;
   }
 
   template <typename ObjectVisitor>
   static inline void IterateBody(HeapObject* obj, int object_size,
                                  ObjectVisitor* v) {
     IteratePointer(obj, kConstantPoolOffset, v);
+    IteratePointer(obj, kHandlerTableOffset, v);
+    IteratePointer(obj, kSourcePositionTableOffset, v);
   }
 
   template <typename StaticVisitor>
   static inline void IterateBody(HeapObject* obj, int object_size) {
     Heap* heap = obj->GetHeap();
     IteratePointer<StaticVisitor>(heap, obj, kConstantPoolOffset);
+    IteratePointer<StaticVisitor>(heap, obj, kHandlerTableOffset);
+    IteratePointer<StaticVisitor>(heap, obj, kSourcePositionTableOffset);
   }
 
   static inline int SizeOf(Map* map, HeapObject* obj) {
@@ -464,7 +469,6 @@
     case JS_MAP_TYPE:
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
-    case JS_ITERATOR_RESULT_TYPE:
     case JS_REGEXP_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index b6dd425..0d01ec2 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -7,6 +7,7 @@
 #include "src/bootstrapper.h"
 #include "src/disasm.h"
 #include "src/disassembler.h"
+#include "src/field-type.h"
 #include "src/macro-assembler.h"
 #include "src/ostreams.h"
 #include "src/regexp/jsregexp.h"
@@ -150,9 +151,6 @@
     case JS_MAP_ITERATOR_TYPE:
       JSMapIterator::cast(this)->JSMapIteratorVerify();
       break;
-    case JS_ITERATOR_RESULT_TYPE:
-      JSIteratorResult::cast(this)->JSIteratorResultVerify();
-      break;
     case JS_WEAK_MAP_TYPE:
       JSWeakMap::cast(this)->JSWeakMapVerify();
       break;
@@ -210,7 +208,7 @@
 void Symbol::SymbolVerify() {
   CHECK(IsSymbol());
   CHECK(HasHashCode());
-  CHECK_GT(Hash(), 0u);
+  CHECK(GetHeap()->hidden_properties_symbol() == this || Hash() > 0u);
   CHECK(name()->IsUndefined() || name()->IsString());
 }
 
@@ -298,9 +296,9 @@
         if (value->IsUninitialized()) continue;
         if (r.IsSmi()) DCHECK(value->IsSmi());
         if (r.IsHeapObject()) DCHECK(value->IsHeapObject());
-        HeapType* field_type = descriptors->GetFieldType(i);
-        bool type_is_none = field_type->Is(HeapType::None());
-        bool type_is_any = HeapType::Any()->Is(field_type);
+        FieldType* field_type = descriptors->GetFieldType(i);
+        bool type_is_none = field_type->IsNone();
+        bool type_is_any = field_type->IsAny();
         if (r.IsNone()) {
           CHECK(type_is_none);
         } else if (!type_is_any && !(type_is_none && r.IsHeapObject())) {
@@ -318,7 +316,8 @@
   // pointer may point to a one pointer filler map.
   if (ElementsAreSafeToExamine()) {
     CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
-              (elements() == GetHeap()->empty_fixed_array())),
+              (elements() == GetHeap()->empty_fixed_array()) ||
+              HasFastStringWrapperElements()),
              (elements()->map() == GetHeap()->fixed_array_map() ||
               elements()->map() == GetHeap()->fixed_cow_array_map()));
     CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
@@ -553,9 +552,7 @@
   VerifyObjectField(kBoundThisOffset);
   VerifyObjectField(kBoundTargetFunctionOffset);
   VerifyObjectField(kBoundArgumentsOffset);
-  VerifyObjectField(kCreationContextOffset);
   CHECK(bound_target_function()->IsCallable());
-  CHECK(creation_context()->IsNativeContext());
   CHECK(IsCallable());
   CHECK_EQ(IsConstructor(), bound_target_function()->IsConstructor());
 }
@@ -765,14 +762,6 @@
 }
 
 
-void JSIteratorResult::JSIteratorResultVerify() {
-  CHECK(IsJSIteratorResult());
-  JSObjectVerify();
-  VerifyPointer(done());
-  VerifyPointer(value());
-}
-
-
 void JSWeakMap::JSWeakMapVerify() {
   CHECK(IsJSWeakMap());
   JSObjectVerify();
@@ -911,12 +900,6 @@
 }
 
 
-void AccessorInfo::AccessorInfoVerify() {
-  VerifyPointer(name());
-  VerifyPointer(expected_receiver_type());
-}
-
-
 void SloppyBlockWithEvalContextExtension::
     SloppyBlockWithEvalContextExtensionVerify() {
   CHECK(IsSloppyBlockWithEvalContextExtension());
@@ -925,9 +908,10 @@
 }
 
 
-void ExecutableAccessorInfo::ExecutableAccessorInfoVerify() {
-  CHECK(IsExecutableAccessorInfo());
-  AccessorInfoVerify();
+void AccessorInfo::AccessorInfoVerify() {
+  CHECK(IsAccessorInfo());
+  VerifyPointer(name());
+  VerifyPointer(expected_receiver_type());
   VerifyPointer(getter());
   VerifyPointer(setter());
   VerifyPointer(data());
@@ -1038,7 +1022,7 @@
 void DebugInfo::DebugInfoVerify() {
   CHECK(IsDebugInfo());
   VerifyPointer(shared());
-  VerifyPointer(code());
+  VerifyPointer(abstract_code());
   VerifyPointer(break_points());
 }
 
@@ -1076,7 +1060,8 @@
     case FAST_HOLEY_DOUBLE_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case FAST_HOLEY_ELEMENTS:
-    case FAST_ELEMENTS: {
+    case FAST_ELEMENTS:
+    case FAST_STRING_WRAPPER_ELEMENTS: {
       info->number_of_objects_with_fast_elements_++;
       int holes = 0;
       FixedArray* e = FixedArray::cast(elements());
@@ -1100,7 +1085,8 @@
       info->number_of_fast_used_elements_ += e->length();
       break;
     }
-    case DICTIONARY_ELEMENTS: {
+    case DICTIONARY_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS: {
       SeededNumberDictionary* dict = element_dictionary();
       info->number_of_slow_used_elements_ += dict->NumberOfElements();
       info->number_of_slow_unused_elements_ +=
@@ -1109,6 +1095,7 @@
     }
     case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
     case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+    case NO_ELEMENTS:
       break;
   }
 }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 0509a80..e00478a 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -18,6 +18,7 @@
 #include "src/conversions-inl.h"
 #include "src/factory.h"
 #include "src/field-index-inl.h"
+#include "src/handles-inl.h"
 #include "src/heap/heap-inl.h"
 #include "src/heap/heap.h"
 #include "src/isolate.h"
@@ -28,7 +29,6 @@
 #include "src/prototype.h"
 #include "src/transitions-inl.h"
 #include "src/type-feedback-vector-inl.h"
-#include "src/types-inl.h"
 #include "src/v8memory.h"
 
 namespace v8 {
@@ -54,14 +54,11 @@
   return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
 }
 
-
-#define TYPE_CHECKER(type, instancetype)                                \
-  bool Object::Is##type() const {                                       \
-  return Object::IsHeapObject() &&                                      \
-      HeapObject::cast(this)->map()->instance_type() == instancetype;   \
+#define TYPE_CHECKER(type, instancetype)           \
+  bool HeapObject::Is##type() const {              \
+    return map()->instance_type() == instancetype; \
   }
 
-
 #define CAST_ACCESSOR(type)                       \
   type* type::cast(Object* object) {              \
     SLOW_DCHECK(object->Is##type());              \
@@ -128,143 +125,118 @@
     set_##field(BooleanBit::set(field(), offset, value));  \
   }
 
-
-bool Object::IsFixedArrayBase() const {
+bool HeapObject::IsFixedArrayBase() const {
   return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
 }
 
-
-bool Object::IsFixedArray() const {
-  if (!IsHeapObject()) return false;
-  InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
+bool HeapObject::IsFixedArray() const {
+  InstanceType instance_type = map()->instance_type();
   return instance_type == FIXED_ARRAY_TYPE ||
          instance_type == TRANSITION_ARRAY_TYPE;
 }
 
 
 // External objects are not extensible, so the map check is enough.
-bool Object::IsExternal() const {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map() ==
-      HeapObject::cast(this)->GetHeap()->external_map();
+bool HeapObject::IsExternal() const {
+  return map() == GetHeap()->external_map();
 }
 
 
-bool Object::IsAccessorInfo() const { return IsExecutableAccessorInfo(); }
-
-
 TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
 TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
 TYPE_CHECKER(Symbol, SYMBOL_TYPE)
 TYPE_CHECKER(Simd128Value, SIMD128_VALUE_TYPE)
 
-
 #define SIMD128_TYPE_CHECKER(TYPE, Type, type, lane_count, lane_type) \
-  bool Object::Is##Type() const {                                     \
-    return Object::IsHeapObject() &&                                  \
-           HeapObject::cast(this)->map() ==                           \
-               HeapObject::cast(this)->GetHeap()->type##_map();       \
-  }
+  bool HeapObject::Is##Type() const { return map() == GetHeap()->type##_map(); }
 SIMD128_TYPES(SIMD128_TYPE_CHECKER)
 #undef SIMD128_TYPE_CHECKER
 
+#define IS_TYPE_FUNCTION_DEF(type_)                               \
+  bool Object::Is##type_() const {                                \
+    return IsHeapObject() && HeapObject::cast(this)->Is##type_(); \
+  }
+HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
+ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
+#undef IS_TYPE_FUNCTION_DEF
 
-bool Object::IsString() const {
-  return Object::IsHeapObject()
-    && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
+bool HeapObject::IsString() const {
+  return map()->instance_type() < FIRST_NONSTRING_TYPE;
 }
 
-
-bool Object::IsName() const {
-  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  return Object::IsHeapObject() &&
-         HeapObject::cast(this)->map()->instance_type() <= LAST_NAME_TYPE;
+bool HeapObject::IsName() const {
+  return map()->instance_type() <= LAST_NAME_TYPE;
 }
 
-
-bool Object::IsUniqueName() const {
+bool HeapObject::IsUniqueName() const {
   return IsInternalizedString() || IsSymbol();
 }
 
+bool Name::IsUniqueName() const {
+  uint32_t type = map()->instance_type();
+  return (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
+         (kStringTag | kNotInternalizedTag);
+}
 
-bool Object::IsFunction() const {
+bool HeapObject::IsFunction() const {
   STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
-  return Object::IsHeapObject() &&
-         HeapObject::cast(this)->map()->instance_type() >= FIRST_FUNCTION_TYPE;
+  return map()->instance_type() >= FIRST_FUNCTION_TYPE;
 }
 
+bool HeapObject::IsCallable() const { return map()->is_callable(); }
 
-bool Object::IsCallable() const {
-  return Object::IsHeapObject() && HeapObject::cast(this)->map()->is_callable();
-}
+bool HeapObject::IsConstructor() const { return map()->is_constructor(); }
 
-
-bool Object::IsConstructor() const {
-  return Object::IsHeapObject() &&
-         HeapObject::cast(this)->map()->is_constructor();
-}
-
-
-bool Object::IsTemplateInfo() const {
+bool HeapObject::IsTemplateInfo() const {
   return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
 }
 
-
-bool Object::IsInternalizedString() const {
-  if (!this->IsHeapObject()) return false;
-  uint32_t type = HeapObject::cast(this)->map()->instance_type();
+bool HeapObject::IsInternalizedString() const {
+  uint32_t type = map()->instance_type();
   STATIC_ASSERT(kNotInternalizedTag != 0);
   return (type & (kIsNotStringMask | kIsNotInternalizedMask)) ==
       (kStringTag | kInternalizedTag);
 }
 
-
-bool Object::IsConsString() const {
+bool HeapObject::IsConsString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsCons();
 }
 
-
-bool Object::IsSlicedString() const {
+bool HeapObject::IsSlicedString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsSliced();
 }
 
-
-bool Object::IsSeqString() const {
+bool HeapObject::IsSeqString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsSequential();
 }
 
-
-bool Object::IsSeqOneByteString() const {
+bool HeapObject::IsSeqOneByteString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsSequential() &&
          String::cast(this)->IsOneByteRepresentation();
 }
 
-
-bool Object::IsSeqTwoByteString() const {
+bool HeapObject::IsSeqTwoByteString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsSequential() &&
          String::cast(this)->IsTwoByteRepresentation();
 }
 
-
-bool Object::IsExternalString() const {
+bool HeapObject::IsExternalString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsExternal();
 }
 
-
-bool Object::IsExternalOneByteString() const {
+bool HeapObject::IsExternalOneByteString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsExternal() &&
          String::cast(this)->IsOneByteRepresentation();
 }
 
-
-bool Object::IsExternalTwoByteString() const {
+bool HeapObject::IsExternalTwoByteString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsExternal() &&
          String::cast(this)->IsTwoByteRepresentation();
@@ -664,10 +636,8 @@
 TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
 TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
 
-
-bool Object::IsFiller() const {
-  if (!Object::IsHeapObject()) return false;
-  InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
+bool HeapObject::IsFiller() const {
+  InstanceType instance_type = map()->instance_type();
   return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
 }
 
@@ -679,41 +649,28 @@
 TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
 #undef TYPED_ARRAY_TYPE_CHECKER
 
-
-bool Object::IsFixedTypedArrayBase() const {
-  if (!Object::IsHeapObject()) return false;
-
-  InstanceType instance_type =
-      HeapObject::cast(this)->map()->instance_type();
+bool HeapObject::IsFixedTypedArrayBase() const {
+  InstanceType instance_type = map()->instance_type();
   return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
           instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE);
 }
 
-
-bool Object::IsJSReceiver() const {
+bool HeapObject::IsJSReceiver() const {
   STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
-  return IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
+  return map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
 }
 
-
-bool Object::IsJSObject() const {
+bool HeapObject::IsJSObject() const {
   STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
-  return IsHeapObject() && HeapObject::cast(this)->map()->IsJSObjectMap();
+  return map()->IsJSObjectMap();
 }
 
-
-bool Object::IsJSProxy() const {
-  if (!Object::IsHeapObject()) return false;
-  return  HeapObject::cast(this)->map()->IsJSProxyMap();
-}
-
+bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
 
 TYPE_CHECKER(JSSet, JS_SET_TYPE)
 TYPE_CHECKER(JSMap, JS_MAP_TYPE)
 TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
 TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE)
-TYPE_CHECKER(JSIteratorResult, JS_ITERATOR_RESULT_TYPE)
 TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
 TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
 TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
@@ -722,35 +679,25 @@
 TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
 TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
 
-
-bool Object::IsJSWeakCollection() const {
+bool HeapObject::IsJSWeakCollection() const {
   return IsJSWeakMap() || IsJSWeakSet();
 }
 
+bool HeapObject::IsDescriptorArray() const { return IsFixedArray(); }
 
-bool Object::IsDescriptorArray() const {
-  return IsFixedArray();
-}
-
-
-bool Object::IsArrayList() const { return IsFixedArray(); }
-
+bool HeapObject::IsArrayList() const { return IsFixedArray(); }
 
 bool Object::IsLayoutDescriptor() const {
   return IsSmi() || IsFixedTypedArrayBase();
 }
 
+bool HeapObject::IsTypeFeedbackVector() const { return IsFixedArray(); }
 
-bool Object::IsTypeFeedbackVector() const { return IsFixedArray(); }
+bool HeapObject::IsTypeFeedbackMetadata() const { return IsFixedArray(); }
 
+bool HeapObject::IsLiteralsArray() const { return IsFixedArray(); }
 
-bool Object::IsTypeFeedbackMetadata() const { return IsFixedArray(); }
-
-
-bool Object::IsLiteralsArray() const { return IsFixedArray(); }
-
-
-bool Object::IsDeoptimizationInputData() const {
+bool HeapObject::IsDeoptimizationInputData() const {
   // Must be a fixed array.
   if (!IsFixedArray()) return false;
 
@@ -765,8 +712,7 @@
   return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0;
 }
 
-
-bool Object::IsDeoptimizationOutputData() const {
+bool HeapObject::IsDeoptimizationOutputData() const {
   if (!IsFixedArray()) return false;
   // There's actually no way to see the difference between a fixed array and
   // a deoptimization data array.  Since this is used for asserts we can check
@@ -775,27 +721,23 @@
   return true;
 }
 
-
-bool Object::IsHandlerTable() const {
+bool HeapObject::IsHandlerTable() const {
   if (!IsFixedArray()) return false;
   // There's actually no way to see the difference between a fixed array and
   // a handler table array.
   return true;
 }
 
-
-bool Object::IsDependentCode() const {
+bool HeapObject::IsDependentCode() const {
   if (!IsFixedArray()) return false;
   // There's actually no way to see the difference between a fixed array and
   // a dependent codes array.
   return true;
 }
 
-
-bool Object::IsContext() const {
-  if (!Object::IsHeapObject()) return false;
-  Map* map = HeapObject::cast(this)->map();
-  Heap* heap = map->GetHeap();
+bool HeapObject::IsContext() const {
+  Map* map = this->map();
+  Heap* heap = GetHeap();
   return (map == heap->function_context_map() ||
       map == heap->catch_context_map() ||
       map == heap->with_context_map() ||
@@ -805,26 +747,16 @@
       map == heap->script_context_map());
 }
 
-
-bool Object::IsNativeContext() const {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map() ==
-      HeapObject::cast(this)->GetHeap()->native_context_map();
+bool HeapObject::IsNativeContext() const {
+  return map() == GetHeap()->native_context_map();
 }
 
-
-bool Object::IsScriptContextTable() const {
-  if (!Object::IsHeapObject()) return false;
-  Map* map = HeapObject::cast(this)->map();
-  Heap* heap = map->GetHeap();
-  return map == heap->script_context_table_map();
+bool HeapObject::IsScriptContextTable() const {
+  return map() == GetHeap()->script_context_table_map();
 }
 
-
-bool Object::IsScopeInfo() const {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map() ==
-      HeapObject::cast(this)->GetHeap()->scope_info_map();
+bool HeapObject::IsScopeInfo() const {
+  return map() == GetHeap()->scope_info_map();
 }
 
 
@@ -849,16 +781,18 @@
 TYPE_CHECKER(JSDate, JS_DATE_TYPE)
 TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
 
+bool HeapObject::IsAbstractCode() const {
+  return IsBytecodeArray() || IsCode();
+}
 
-bool Object::IsStringWrapper() const {
+bool HeapObject::IsStringWrapper() const {
   return IsJSValue() && JSValue::cast(this)->value()->IsString();
 }
 
 
 TYPE_CHECKER(Foreign, FOREIGN_TYPE)
 
-
-bool Object::IsBoolean() const {
+bool HeapObject::IsBoolean() const {
   return IsOddball() &&
       ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
 }
@@ -869,8 +803,7 @@
 TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
 TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
 
-
-bool Object::IsJSArrayBufferView() const {
+bool HeapObject::IsJSArrayBufferView() const {
   return IsJSDataView() || IsJSTypedArray();
 }
 
@@ -882,22 +815,14 @@
   return obj->IsJSArray();
 }
 
-
-bool Object::IsHashTable() const {
-  return Object::IsHeapObject() &&
-      HeapObject::cast(this)->map() ==
-      HeapObject::cast(this)->GetHeap()->hash_table_map();
+bool HeapObject::IsHashTable() const {
+  return map() == GetHeap()->hash_table_map();
 }
 
+bool HeapObject::IsWeakHashTable() const { return IsHashTable(); }
 
-bool Object::IsWeakHashTable() const {
-  return IsHashTable();
-}
-
-
-bool Object::IsDictionary() const {
-  return IsHashTable() &&
-      this != HeapObject::cast(this)->GetHeap()->string_table();
+bool HeapObject::IsDictionary() const {
+  return IsHashTable() && this != GetHeap()->string_table();
 }
 
 
@@ -918,13 +843,9 @@
   return IsDictionary();
 }
 
+bool HeapObject::IsStringTable() const { return IsHashTable(); }
 
-bool Object::IsStringTable() const {
-  return IsHashTable();
-}
-
-
-bool Object::IsNormalizedMapCache() const {
+bool HeapObject::IsNormalizedMapCache() const {
   return NormalizedMapCache::IsNormalizedMapCache(this);
 }
 
@@ -933,51 +854,34 @@
   return map->Hash() % NormalizedMapCache::kEntries;
 }
 
-
-bool NormalizedMapCache::IsNormalizedMapCache(const Object* obj) {
+bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject* obj) {
   if (!obj->IsFixedArray()) return false;
   if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
     return false;
   }
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
-    reinterpret_cast<NormalizedMapCache*>(const_cast<Object*>(obj))->
-        NormalizedMapCacheVerify();
+    reinterpret_cast<NormalizedMapCache*>(const_cast<HeapObject*>(obj))
+        ->NormalizedMapCacheVerify();
   }
 #endif
   return true;
 }
 
+bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
 
-bool Object::IsCompilationCacheTable() const {
+bool HeapObject::IsCodeCacheHashTable() const { return IsHashTable(); }
+
+bool HeapObject::IsPolymorphicCodeCacheHashTable() const {
   return IsHashTable();
 }
 
+bool HeapObject::IsMapCache() const { return IsHashTable(); }
 
-bool Object::IsCodeCacheHashTable() const {
-  return IsHashTable();
-}
+bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
 
-
-bool Object::IsPolymorphicCodeCacheHashTable() const {
-  return IsHashTable();
-}
-
-
-bool Object::IsMapCache() const {
-  return IsHashTable();
-}
-
-
-bool Object::IsObjectHashTable() const {
-  return IsHashTable();
-}
-
-
-bool Object::IsOrderedHashTable() const {
-  return IsHeapObject() &&
-      HeapObject::cast(this)->map() ==
-      HeapObject::cast(this)->GetHeap()->ordered_hash_table_map();
+bool HeapObject::IsOrderedHashTable() const {
+  return map() == GetHeap()->ordered_hash_table_map();
 }
 
 
@@ -995,40 +899,30 @@
   return IsSmi() || HeapObject::cast(this)->map()->IsPrimitiveMap();
 }
 
-
-bool Object::IsJSGlobalProxy() const {
-  bool result = IsHeapObject() &&
-                (HeapObject::cast(this)->map()->instance_type() ==
-                 JS_GLOBAL_PROXY_TYPE);
-  DCHECK(!result ||
-         HeapObject::cast(this)->map()->is_access_check_needed());
+bool HeapObject::IsJSGlobalProxy() const {
+  bool result = map()->instance_type() == JS_GLOBAL_PROXY_TYPE;
+  DCHECK(!result || map()->is_access_check_needed());
   return result;
 }
 
 
 TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
 
-
-bool Object::IsUndetectableObject() const {
-  return IsHeapObject()
-    && HeapObject::cast(this)->map()->is_undetectable();
+bool HeapObject::IsUndetectableObject() const {
+  return map()->is_undetectable();
 }
 
-
-bool Object::IsAccessCheckNeeded() const {
-  if (!IsHeapObject()) return false;
+bool HeapObject::IsAccessCheckNeeded() const {
   if (IsJSGlobalProxy()) {
     const JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
     JSGlobalObject* global = proxy->GetIsolate()->context()->global_object();
     return proxy->IsDetachedFrom(global);
   }
-  return HeapObject::cast(this)->map()->is_access_check_needed();
+  return map()->is_access_check_needed();
 }
 
-
-bool Object::IsStruct() const {
-  if (!IsHeapObject()) return false;
-  switch (HeapObject::cast(this)->map()->instance_type()) {
+bool HeapObject::IsStruct() const {
+  switch (map()->instance_type()) {
 #define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true;
   STRUCT_LIST(MAKE_STRUCT_CASE)
 #undef MAKE_STRUCT_CASE
@@ -1036,56 +930,23 @@
   }
 }
 
-
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name)                         \
-  bool Object::Is##Name() const {                                       \
-    return Object::IsHeapObject()                                       \
-      && HeapObject::cast(this)->map()->instance_type() == NAME##_TYPE; \
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name)                  \
+  bool Object::Is##Name() const {                                \
+    return IsHeapObject() && HeapObject::cast(this)->Is##Name(); \
+  }                                                              \
+  bool HeapObject::Is##Name() const {                            \
+    return map()->instance_type() == NAME##_TYPE;                \
   }
-  STRUCT_LIST(MAKE_STRUCT_PREDICATE)
+STRUCT_LIST(MAKE_STRUCT_PREDICATE)
 #undef MAKE_STRUCT_PREDICATE
 
+#define MAKE_ODDBALL_PREDICATE(Name)                                       \
+  bool HeapObject::Is##Name() const {                                      \
+    return IsOddball() && Oddball::cast(this)->kind() == Oddball::k##Name; \
+  }
+ODDBALL_LIST(MAKE_ODDBALL_PREDICATE)
 
-bool Object::IsUndefined() const {
-  return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUndefined;
-}
-
-
-bool Object::IsNull() const {
-  return IsOddball() && Oddball::cast(this)->kind() == Oddball::kNull;
-}
-
-
-bool Object::IsTheHole() const {
-  return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTheHole;
-}
-
-
-bool Object::IsException() const {
-  return IsOddball() && Oddball::cast(this)->kind() == Oddball::kException;
-}
-
-
-bool Object::IsUninitialized() const {
-  return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUninitialized;
-}
-
-
-bool Object::IsTrue() const {
-  return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
-}
-
-
-bool Object::IsFalse() const {
-  return IsOddball() && Oddball::cast(this)->kind() == Oddball::kFalse;
-}
-
-
-bool Object::IsArgumentsMarker() const {
-  return IsOddball() && Oddball::cast(this)->kind() == Oddball::kArgumentMarker;
-}
-
-
+#undef MAKE_ODDBALL_PREDICATE
 double Object::Number() const {
   DCHECK(IsNumber());
   return IsSmi()
@@ -1130,14 +991,14 @@
 
 
 bool Object::FitsRepresentation(Representation representation) {
-  if (FLAG_track_fields && representation.IsNone()) {
-    return false;
-  } else if (FLAG_track_fields && representation.IsSmi()) {
+  if (FLAG_track_fields && representation.IsSmi()) {
     return IsSmi();
   } else if (FLAG_track_double_fields && representation.IsDouble()) {
     return IsMutableHeapNumber() || IsNumber();
   } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
     return IsHeapObject();
+  } else if (FLAG_track_fields && representation.IsNone()) {
+    return false;
   }
   return true;
 }
@@ -1146,8 +1007,8 @@
 // static
 MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
                                          Handle<Object> object) {
-  return ToObject(
-      isolate, object, handle(isolate->context()->native_context(), isolate));
+  if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
+  return ToObject(isolate, object, isolate->native_context());
 }
 
 
@@ -1163,20 +1024,16 @@
   return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
 }
 
-
 MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
-                                        Handle<Name> name,
-                                        LanguageMode language_mode) {
+                                        Handle<Name> name) {
   LookupIterator it(object, name);
-  return GetProperty(&it, language_mode);
+  return GetProperty(&it);
 }
 
-
 MaybeHandle<Object> Object::GetElement(Isolate* isolate, Handle<Object> object,
-                                       uint32_t index,
-                                       LanguageMode language_mode) {
+                                       uint32_t index) {
   LookupIterator it(isolate, object, index);
-  return GetProperty(&it, language_mode);
+  return GetProperty(&it);
 }
 
 
@@ -1189,25 +1046,23 @@
   return value;
 }
 
-
-MaybeHandle<Object> Object::GetPrototype(Isolate* isolate,
-                                         Handle<Object> receiver) {
+MaybeHandle<Object> JSReceiver::GetPrototype(Isolate* isolate,
+                                             Handle<JSReceiver> receiver) {
   // We don't expect access checks to be needed on JSProxy objects.
   DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject());
   PrototypeIterator iter(isolate, receiver,
-                         PrototypeIterator::START_AT_RECEIVER);
+                         PrototypeIterator::START_AT_RECEIVER,
+                         PrototypeIterator::END_AT_NON_HIDDEN);
   do {
     if (!iter.AdvanceFollowingProxies()) return MaybeHandle<Object>();
-  } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
+  } while (!iter.IsAtEnd());
   return PrototypeIterator::GetCurrent(iter);
 }
 
-
 MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
-                                        const char* name,
-                                        LanguageMode language_mode) {
+                                        const char* name) {
   Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
-  return GetProperty(object, str, language_mode);
+  return GetProperty(object, str);
 }
 
 
@@ -1241,12 +1096,10 @@
       reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
       reinterpret_cast<base::AtomicWord>(value));
 
-#define WRITE_BARRIER(heap, object, offset, value)                      \
-  heap->incremental_marking()->RecordWrite(                             \
-      object, HeapObject::RawField(object, offset), value);             \
-  if (heap->InNewSpace(value)) {                                        \
-    heap->RecordWrite(object->address(), offset);                       \
-  }
+#define WRITE_BARRIER(heap, object, offset, value)          \
+  heap->incremental_marking()->RecordWrite(                 \
+      object, HeapObject::RawField(object, offset), value); \
+  heap->RecordWrite(object, offset, value);
 
 #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
   if (mode != SKIP_WRITE_BARRIER) {                                  \
@@ -1254,9 +1107,7 @@
       heap->incremental_marking()->RecordWrite(                      \
           object, HeapObject::RawField(object, offset), value);      \
     }                                                                \
-    if (heap->InNewSpace(value)) {                                   \
-      heap->RecordWrite(object->address(), offset);                  \
-    }                                                                \
+    heap->RecordWrite(object, offset, value);                        \
   }
 
 #define READ_DOUBLE_FIELD(p, offset) \
@@ -1390,8 +1241,9 @@
 
 
 Heap* HeapObject::GetHeap() const {
-  Heap* heap =
-      MemoryChunk::FromAddress(reinterpret_cast<const byte*>(this))->heap();
+  Heap* heap = MemoryChunk::FromAddress(
+                   reinterpret_cast<Address>(const_cast<HeapObject*>(this)))
+                   ->heap();
   SLOW_DCHECK(heap != NULL);
   return heap;
 }
@@ -1501,6 +1353,10 @@
 
 
 bool Simd128Value::Equals(Simd128Value* that) {
+  // TODO(bmeurer): This doesn't match the SIMD.js specification, but it seems
+  // to be consistent with what the CompareICStub does, and what is tested in
+  // the current SIMD.js testsuite.
+  if (this == that) return true;
 #define SIMD128_VALUE(TYPE, Type, type, lane_count, lane_type) \
   if (this->Is##Type()) {                                      \
     if (!that->Is##Type()) return false;                       \
@@ -1858,6 +1714,9 @@
   return AllocationSite::cast(allocation_site());
 }
 
+Address AllocationMemento::GetAllocationSiteUnchecked() {
+  return reinterpret_cast<Address>(allocation_site());
+}
 
 void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
   JSObject::ValidateElements(object);
@@ -1953,7 +1812,8 @@
                                  Handle<FixedArrayBase> value) {
   JSObject::MigrateToMap(object, new_map);
   DCHECK((object->map()->has_fast_smi_or_object_elements() ||
-          (*value == object->GetHeap()->empty_fixed_array())) ==
+          (*value == object->GetHeap()->empty_fixed_array()) ||
+          object->map()->has_fast_string_wrapper_elements()) ==
          (value->map() == object->GetHeap()->fixed_array_map() ||
           value->map() == object->GetHeap()->fixed_cow_array_map()));
   DCHECK((*value == object->GetHeap()->empty_fixed_array()) ||
@@ -2040,9 +1900,7 @@
   // We just have to execute the generational barrier here because we never
   // mark through a weak cell and collect evacuation candidates when we process
   // all weak cells.
-  if (heap->InNewSpace(val)) {
-    heap->RecordWrite(address(), kValueOffset);
-  }
+  heap->RecordWrite(this, kValueOffset, val);
 }
 
 
@@ -2110,8 +1968,6 @@
       return JSSetIterator::kSize;
     case JS_MAP_ITERATOR_TYPE:
       return JSMapIterator::kSize;
-    case JS_ITERATOR_RESULT_TYPE:
-      return JSIteratorResult::kSize;
     case JS_WEAK_MAP_TYPE:
       return JSWeakMap::kSize;
     case JS_WEAK_SET_TYPE:
@@ -2236,15 +2092,10 @@
   }
 }
 
-
-void JSObject::WriteToField(int descriptor, Object* value) {
-  DisallowHeapAllocation no_gc;
-
-  DescriptorArray* desc = map()->instance_descriptors();
-  PropertyDetails details = desc->GetDetails(descriptor);
-
+void JSObject::WriteToField(int descriptor, PropertyDetails details,
+                            Object* value) {
   DCHECK(details.type() == DATA);
-
+  DisallowHeapAllocation no_gc;
   FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
   if (details.representation().IsDouble()) {
     // Nothing more to be done.
@@ -2261,6 +2112,11 @@
   }
 }
 
+void JSObject::WriteToField(int descriptor, Object* value) {
+  DescriptorArray* desc = map()->instance_descriptors();
+  PropertyDetails details = desc->GetDetails(descriptor);
+  WriteToField(descriptor, details, value);
+}
 
 int JSObject::GetInObjectPropertyOffset(int index) {
   return map()->GetInObjectPropertyOffset(index);
@@ -2335,19 +2191,6 @@
 }
 
 
-bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
-  if (!this->IsJSValue()) return false;
-
-  JSValue* js_value = JSValue::cast(this);
-  if (!js_value->value()->IsString()) return false;
-
-  String* str = String::cast(js_value->value());
-  if (index >= static_cast<uint32_t>(str->length())) return false;
-
-  return true;
-}
-
-
 void Object::VerifyApiCallResultType() {
 #if DEBUG
   if (!(IsSmi() || IsString() || IsSymbol() || IsJSReceiver() ||
@@ -2364,9 +2207,8 @@
   return READ_FIELD(this, kHeaderSize + index * kPointerSize);
 }
 
-
-Handle<Object> FixedArray::get(Handle<FixedArray> array, int index) {
-  return handle(array->get(index), array->GetIsolate());
+Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
+  return handle(array->get(index), isolate);
 }
 
 
@@ -2387,7 +2229,8 @@
 void FixedArray::set(int index, Object* value) {
   DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
   DCHECK(IsFixedArray());
-  DCHECK(index >= 0 && index < this->length());
+  DCHECK_GE(index, 0);
+  DCHECK_LT(index, this->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
   WRITE_BARRIER(GetHeap(), this, offset, value);
@@ -2411,13 +2254,12 @@
   return READ_UINT64_FIELD(this, offset);
 }
 
-
-Handle<Object> FixedDoubleArray::get(Handle<FixedDoubleArray> array,
-                                     int index) {
+Handle<Object> FixedDoubleArray::get(FixedDoubleArray* array, int index,
+                                     Isolate* isolate) {
   if (array->is_the_hole(index)) {
-    return array->GetIsolate()->factory()->the_hole_value();
+    return isolate->factory()->the_hole_value();
   } else {
-    return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index));
+    return isolate->factory()->NewNumber(array->get_scalar(index));
   }
 }
 
@@ -2706,15 +2548,14 @@
                               kEnumCacheOffset);
 }
 
-
-// Perform a binary search in a fixed array. Low and high are entry indices. If
-// there are three entries in this array it should be called with low=0 and
-// high=2.
+// Perform a binary search in a fixed array.
 template <SearchMode search_mode, typename T>
-int BinarySearch(T* array, Name* name, int low, int high, int valid_entries,
+int BinarySearch(T* array, Name* name, int valid_entries,
                  int* out_insertion_index) {
   DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == NULL);
-  uint32_t hash = name->Hash();
+  int low = 0;
+  int high = array->number_of_entries() - 1;
+  uint32_t hash = name->hash_field();
   int limit = high;
 
   DCHECK(low <= high);
@@ -2722,7 +2563,7 @@
   while (low != high) {
     int mid = low + (high - low) / 2;
     Name* mid_name = array->GetSortedKey(mid);
-    uint32_t mid_hash = mid_name->Hash();
+    uint32_t mid_hash = mid_name->hash_field();
 
     if (mid_hash >= hash) {
       high = mid;
@@ -2734,14 +2575,14 @@
   for (; low <= limit; ++low) {
     int sort_index = array->GetSortedKeyIndex(low);
     Name* entry = array->GetKey(sort_index);
-    uint32_t current_hash = entry->Hash();
+    uint32_t current_hash = entry->hash_field();
     if (current_hash != hash) {
-      if (out_insertion_index != NULL) {
+      if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
         *out_insertion_index = sort_index + (current_hash > hash ? 0 : 1);
       }
       return T::kNotFound;
     }
-    if (entry->Equals(name)) {
+    if (entry == name) {
       if (search_mode == ALL_ENTRIES || sort_index < valid_entries) {
         return sort_index;
       }
@@ -2749,7 +2590,9 @@
     }
   }
 
-  if (out_insertion_index != NULL) *out_insertion_index = limit + 1;
+  if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
+    *out_insertion_index = limit + 1;
+  }
   return T::kNotFound;
 }
 
@@ -2757,29 +2600,28 @@
 // Perform a linear search in this fixed array. len is the number of entry
 // indices that are valid.
 template <SearchMode search_mode, typename T>
-int LinearSearch(T* array, Name* name, int len, int valid_entries,
+int LinearSearch(T* array, Name* name, int valid_entries,
                  int* out_insertion_index) {
-  uint32_t hash = name->Hash();
-  if (search_mode == ALL_ENTRIES) {
+  if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
+    uint32_t hash = name->hash_field();
+    int len = array->number_of_entries();
     for (int number = 0; number < len; number++) {
       int sorted_index = array->GetSortedKeyIndex(number);
       Name* entry = array->GetKey(sorted_index);
-      uint32_t current_hash = entry->Hash();
+      uint32_t current_hash = entry->hash_field();
       if (current_hash > hash) {
-        if (out_insertion_index != NULL) *out_insertion_index = sorted_index;
+        *out_insertion_index = sorted_index;
         return T::kNotFound;
       }
-      if (current_hash == hash && entry->Equals(name)) return sorted_index;
+      if (entry == name) return sorted_index;
     }
-    if (out_insertion_index != NULL) *out_insertion_index = len;
+    *out_insertion_index = len;
     return T::kNotFound;
   } else {
-    DCHECK(len >= valid_entries);
+    DCHECK_LE(valid_entries, array->number_of_entries());
     DCHECK_NULL(out_insertion_index);  // Not supported here.
     for (int number = 0; number < valid_entries; number++) {
-      Name* entry = array->GetKey(number);
-      uint32_t current_hash = entry->Hash();
-      if (current_hash == hash && entry->Equals(name)) return number;
+      if (array->GetKey(number) == name) return number;
     }
     return T::kNotFound;
   }
@@ -2788,44 +2630,39 @@
 
 template <SearchMode search_mode, typename T>
 int Search(T* array, Name* name, int valid_entries, int* out_insertion_index) {
-  if (search_mode == VALID_ENTRIES) {
-    SLOW_DCHECK(array->IsSortedNoDuplicates(valid_entries));
-  } else {
-    SLOW_DCHECK(array->IsSortedNoDuplicates());
-  }
+  SLOW_DCHECK(array->IsSortedNoDuplicates());
 
-  int nof = array->number_of_entries();
-  if (nof == 0) {
-    if (out_insertion_index != NULL) *out_insertion_index = 0;
+  if (valid_entries == 0) {
+    if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
+      *out_insertion_index = 0;
+    }
     return T::kNotFound;
   }
 
   // Fast case: do linear search for small arrays.
   const int kMaxElementsForLinearSearch = 8;
-  if ((search_mode == ALL_ENTRIES &&
-       nof <= kMaxElementsForLinearSearch) ||
-      (search_mode == VALID_ENTRIES &&
-       valid_entries <= (kMaxElementsForLinearSearch * 3))) {
-    return LinearSearch<search_mode>(array, name, nof, valid_entries,
+  if (valid_entries <= kMaxElementsForLinearSearch) {
+    return LinearSearch<search_mode>(array, name, valid_entries,
                                      out_insertion_index);
   }
 
   // Slow case: perform binary search.
-  return BinarySearch<search_mode>(array, name, 0, nof - 1, valid_entries,
+  return BinarySearch<search_mode>(array, name, valid_entries,
                                    out_insertion_index);
 }
 
 
 int DescriptorArray::Search(Name* name, int valid_descriptors) {
+  DCHECK(name->IsUniqueName());
   return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors, NULL);
 }
 
-
-int DescriptorArray::SearchWithCache(Name* name, Map* map) {
+int DescriptorArray::SearchWithCache(Isolate* isolate, Name* name, Map* map) {
+  DCHECK(name->IsUniqueName());
   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
   if (number_of_own_descriptors == 0) return kNotFound;
 
-  DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
+  DescriptorLookupCache* cache = isolate->descriptor_lookup_cache();
   int number = cache->Lookup(map, name);
 
   if (number == DescriptorLookupCache::kAbsent) {
@@ -2836,7 +2673,6 @@
   return number;
 }
 
-
 PropertyDetails Map::GetLastDescriptorDetails() {
   return instance_descriptors()->GetDetails(LastAdded());
 }
@@ -2874,8 +2710,7 @@
 
 
 FixedArrayBase* Map::GetInitialElements() {
-  if (has_fast_smi_or_object_elements() ||
-      has_fast_double_elements()) {
+  if (has_fast_elements() || has_fast_string_wrapper_elements()) {
     DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
     return GetHeap()->empty_fixed_array();
   } else if (has_fixed_typed_array_elements()) {
@@ -2976,18 +2811,6 @@
   return GetDetails(descriptor_number).field_index();
 }
 
-
-HeapType* DescriptorArray::GetFieldType(int descriptor_number) {
-  DCHECK(GetDetails(descriptor_number).location() == kField);
-  Object* value = GetValue(descriptor_number);
-  if (value->IsWeakCell()) {
-    if (WeakCell::cast(value)->cleared()) return HeapType::None();
-    value = WeakCell::cast(value)->value();
-  }
-  return HeapType::cast(value);
-}
-
-
 Object* DescriptorArray::GetConstant(int descriptor_number) {
   return GetValue(descriptor_number);
 }
@@ -3180,8 +3003,7 @@
 // ------------------------------------
 // Cast operations
 
-
-CAST_ACCESSOR(AccessorInfo)
+CAST_ACCESSOR(AbstractCode)
 CAST_ACCESSOR(ArrayList)
 CAST_ACCESSOR(Bool16x8)
 CAST_ACCESSOR(Bool32x4)
@@ -3232,7 +3054,6 @@
 CAST_ACCESSOR(JSRegExp)
 CAST_ACCESSOR(JSSet)
 CAST_ACCESSOR(JSSetIterator)
-CAST_ACCESSOR(JSIteratorResult)
 CAST_ACCESSOR(JSTypedArray)
 CAST_ACCESSOR(JSValue)
 CAST_ACCESSOR(JSWeakMap)
@@ -3418,6 +3239,28 @@
   return length() - kFirstLiteralIndex;
 }
 
+int HandlerTable::GetRangeStart(int index) const {
+  return Smi::cast(get(index * kRangeEntrySize + kRangeStartIndex))->value();
+}
+
+int HandlerTable::GetRangeEnd(int index) const {
+  return Smi::cast(get(index * kRangeEntrySize + kRangeEndIndex))->value();
+}
+
+int HandlerTable::GetRangeHandler(int index) const {
+  return HandlerOffsetField::decode(
+      Smi::cast(get(index * kRangeEntrySize + kRangeHandlerIndex))->value());
+}
+
+int HandlerTable::GetRangeData(int index) const {
+  return Smi::cast(get(index * kRangeEntrySize + kRangeDataIndex))->value();
+}
+
+HandlerTable::CatchPrediction HandlerTable::GetRangePrediction(
+    int index) const {
+  return HandlerPredictionField::decode(
+      Smi::cast(get(index * kRangeEntrySize + kRangeHandlerIndex))->value());
+}
 
 void HandlerTable::SetRangeStart(int index, int value) {
   set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
@@ -3436,9 +3279,8 @@
   set(index * kRangeEntrySize + kRangeHandlerIndex, Smi::FromInt(value));
 }
 
-
-void HandlerTable::SetRangeDepth(int index, int value) {
-  set(index * kRangeEntrySize + kRangeDepthIndex, Smi::FromInt(value));
+void HandlerTable::SetRangeData(int index, int value) {
+  set(index * kRangeEntrySize + kRangeDataIndex, Smi::FromInt(value));
 }
 
 
@@ -3454,6 +3296,9 @@
   set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value));
 }
 
+int HandlerTable::NumberOfRangeEntries() const {
+  return length() / kRangeEntrySize;
+}
 
 #define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
   STRUCT_LIST(MAKE_STRUCT_CAST)
@@ -3584,12 +3429,6 @@
 }
 
 
-Handle<Name> Name::Flatten(Handle<Name> name, PretenureFlag pretenure) {
-  if (name->IsSymbol()) return name;
-  return String::Flatten(Handle<String>::cast(name));
-}
-
-
 uint16_t String::Get(int index) {
   DCHECK(index >= 0 && index < length());
   switch (StringShape(this).full_representation_tag()) {
@@ -4045,6 +3884,14 @@
                   (number_of_parameters << kPointerSizeLog2));
 }
 
+int BytecodeArray::interrupt_budget() const {
+  return READ_INT_FIELD(this, kInterruptBudgetOffset);
+}
+
+void BytecodeArray::set_interrupt_budget(int interrupt_budget) {
+  DCHECK_GE(interrupt_budget, 0);
+  WRITE_INT_FIELD(this, kInterruptBudgetOffset, interrupt_budget);
+}
 
 int BytecodeArray::parameter_count() const {
   // Parameter count is stored as the size on stack of the parameters to allow
@@ -4054,6 +3901,9 @@
 
 
 ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
+ACCESSORS(BytecodeArray, handler_table, FixedArray, kHandlerTableOffset)
+ACCESSORS(BytecodeArray, source_position_table, FixedArray,
+          kSourcePositionTableOffset)
 
 
 Address BytecodeArray::GetFirstBytecodeAddress() {
@@ -4219,11 +4069,9 @@
   return value;
 }
 
-
 template <class Traits>
-Handle<Object> FixedTypedArray<Traits>::get(
-    Handle<FixedTypedArray<Traits> > array,
-    int index) {
+Handle<Object> FixedTypedArray<Traits>::get(FixedTypedArray<Traits>* array,
+                                            int index) {
   return Traits::ToHandle(array->GetIsolate(), array->get_scalar(index));
 }
 
@@ -4470,8 +4318,12 @@
 }
 
 
-void Map::set_is_constructor() {
-  set_bit_field(bit_field() | (1 << kIsConstructor));
+void Map::set_is_constructor(bool value) {
+  if (value) {
+    set_bit_field(bit_field() | (1 << kIsConstructor));
+  } else {
+    set_bit_field(bit_field() & ~(1 << kIsConstructor));
+  }
 }
 
 
@@ -4479,14 +4331,12 @@
   return ((1 << kIsConstructor) & bit_field()) != 0;
 }
 
-
-void Map::set_is_hidden_prototype() {
-  set_bit_field3(IsHiddenPrototype::update(bit_field3(), true));
+void Map::set_has_hidden_prototype(bool value) {
+  set_bit_field3(HasHiddenPrototype::update(bit_field3(), value));
 }
 
-
-bool Map::is_hidden_prototype() const {
-  return IsHiddenPrototype::decode(bit_field3());
+bool Map::has_hidden_prototype() const {
+  return HasHiddenPrototype::decode(bit_field3());
 }
 
 
@@ -4596,6 +4446,10 @@
   return IsSloppyArgumentsElements(elements_kind());
 }
 
+bool Map::has_fast_string_wrapper_elements() {
+  return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
+}
+
 bool Map::has_fixed_typed_array_elements() {
   return IsFixedTypedArrayElementsKind(elements_kind());
 }
@@ -4898,6 +4752,13 @@
   return interpreter_entry.location() != nullptr && *interpreter_entry == this;
 }
 
+inline bool Code::is_interpreter_enter_bytecode_dispatch() {
+  Handle<Code> interpreter_handler =
+      GetIsolate()->builtins()->InterpreterEnterBytecodeDispatch();
+  return interpreter_handler.location() != nullptr &&
+         *interpreter_handler == this;
+}
+
 inline void Code::set_is_crankshafted(bool value) {
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
   int updated = IsCrankshaftedField::update(previous, value);
@@ -5274,6 +5135,19 @@
   friend class Code;
 };
 
+int AbstractCode::Size() {
+  if (IsCode()) {
+    return GetCode()->instruction_size();
+  } else {
+    return GetBytecodeArray()->length();
+  }
+}
+
+Code* AbstractCode::GetCode() { return Code::cast(this); }
+
+BytecodeArray* AbstractCode::GetBytecodeArray() {
+  return BytecodeArray::cast(this);
+}
 
 Object* Map::prototype() const {
   return READ_FIELD(this, kPrototypeOffset);
@@ -5458,7 +5332,6 @@
           kBoundTargetFunctionOffset)
 ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
 ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
-ACCESSORS(JSBoundFunction, creation_context, Context, kCreationContextOffset)
 
 ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
 ACCESSORS(JSFunction, literals, LiteralsArray, kLiteralsOffset)
@@ -5475,9 +5348,9 @@
 ACCESSORS(AccessorInfo, expected_receiver_type, Object,
           kExpectedReceiverTypeOffset)
 
-ACCESSORS(ExecutableAccessorInfo, getter, Object, kGetterOffset)
-ACCESSORS(ExecutableAccessorInfo, setter, Object, kSetterOffset)
-ACCESSORS(ExecutableAccessorInfo, data, Object, kDataOffset)
+ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
+ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
+ACCESSORS(AccessorInfo, data, Object, kDataOffset)
 
 ACCESSORS(Box, value, Object, kValueOffset)
 
@@ -5515,11 +5388,11 @@
 ACCESSORS(CallHandlerInfo, fast_handler, Object, kFastHandlerOffset)
 
 ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
+ACCESSORS(TemplateInfo, serial_number, Object, kSerialNumberOffset)
 SMI_ACCESSORS(TemplateInfo, number_of_properties, kNumberOfProperties)
 ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
 ACCESSORS(TemplateInfo, property_accessors, Object, kPropertyAccessorsOffset)
 
-ACCESSORS(FunctionTemplateInfo, serial_number, Object, kSerialNumberOffset)
 ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
 ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
           kPrototypeTemplateOffset)
@@ -5601,10 +5474,14 @@
 
 
 ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
-ACCESSORS(DebugInfo, code, Code, kCodeIndex)
+ACCESSORS(DebugInfo, abstract_code, AbstractCode, kAbstractCodeIndex)
 ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
 
-SMI_ACCESSORS(BreakPointInfo, code_position, kCodePositionIndex)
+BytecodeArray* DebugInfo::original_bytecode_array() {
+  return shared()->bytecode_array();
+}
+
+SMI_ACCESSORS(BreakPointInfo, code_offset, kCodeOffsetIndex)
 SMI_ACCESSORS(BreakPointInfo, source_position, kSourcePositionIndex)
 SMI_ACCESSORS(BreakPointInfo, statement_position, kStatementPositionIndex)
 ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
@@ -5641,8 +5518,8 @@
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, instantiated, kInstantiatedBit)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
                kAcceptAnyReceiver)
-BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
-               kIsExpressionBit)
+BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_named_expression,
+               kIsNamedExpressionBit)
 BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
                kIsTopLevelBit)
 
@@ -5664,7 +5541,8 @@
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, deserialized, kDeserialized)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, never_compiled,
                kNeverCompiled)
-
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
+               kIsDeclaration)
 
 #if V8_HOST_ARCH_32_BIT
 SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
@@ -5804,7 +5682,8 @@
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
                name_should_print_as_anonymous,
                kNameShouldPrintAsAnonymous)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous_expression,
+               kIsAnonymousExpression)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft,
                kDontCrankshaft)
@@ -5813,8 +5692,10 @@
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
                kIsConciseMethod)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_accessor_function,
-               kIsAccessorFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_getter_function,
+               kIsGetterFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_setter_function,
+               kIsSetterFunction)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_default_constructor,
                kIsDefaultConstructor)
 
@@ -5928,7 +5809,8 @@
 
 
 bool SharedFunctionInfo::HasDebugCode() {
-  return code()->kind() == Code::FUNCTION && code()->has_debug_break_slots();
+  return HasBytecodeArray() ||
+         (code()->kind() == Code::FUNCTION && code()->has_debug_break_slots());
 }
 
 
@@ -6298,6 +6180,7 @@
 ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
 ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
 ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
+ACCESSORS(JSGeneratorObject, input, Object, kInputOffset)
 SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
 ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
 
@@ -6720,22 +6603,30 @@
   return IsSloppyArgumentsElements(GetElementsKind());
 }
 
+bool JSObject::HasStringWrapperElements() {
+  return IsStringWrapperElementsKind(GetElementsKind());
+}
+
+bool JSObject::HasFastStringWrapperElements() {
+  return GetElementsKind() == FAST_STRING_WRAPPER_ELEMENTS;
+}
+
+bool JSObject::HasSlowStringWrapperElements() {
+  return GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS;
+}
 
 bool JSObject::HasFixedTypedArrayElements() {
-  HeapObject* array = elements();
-  DCHECK(array != NULL);
-  return array->IsFixedTypedArrayBase();
+  DCHECK_NOT_NULL(elements());
+  return map()->has_fixed_typed_array_elements();
 }
 
-
-#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size)         \
-bool JSObject::HasFixed##Type##Elements() {                               \
-  HeapObject* array = elements();                                         \
-  DCHECK(array != NULL);                                                  \
-  if (!array->IsHeapObject())                                             \
-    return false;                                                         \
-  return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE;      \
-}
+#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size)      \
+  bool JSObject::HasFixed##Type##Elements() {                          \
+    HeapObject* array = elements();                                    \
+    DCHECK(array != NULL);                                             \
+    if (!array->IsHeapObject()) return false;                          \
+    return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
+  }
 
 TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
 
@@ -6760,7 +6651,7 @@
 
 
 SeededNumberDictionary* JSObject::element_dictionary() {
-  DCHECK(HasDictionaryElements());
+  DCHECK(HasDictionaryElements() || HasSlowStringWrapperElements());
   return SeededNumberDictionary::cast(elements());
 }
 
@@ -6975,9 +6866,8 @@
 
 
 // static
-Maybe<bool> Object::GreaterThan(Handle<Object> x, Handle<Object> y,
-                                Strength strength) {
-  Maybe<ComparisonResult> result = Compare(x, y, strength);
+Maybe<bool> Object::GreaterThan(Handle<Object> x, Handle<Object> y) {
+  Maybe<ComparisonResult> result = Compare(x, y);
   if (result.IsJust()) {
     switch (result.FromJust()) {
       case ComparisonResult::kGreaterThan:
@@ -6993,9 +6883,8 @@
 
 
 // static
-Maybe<bool> Object::GreaterThanOrEqual(Handle<Object> x, Handle<Object> y,
-                                       Strength strength) {
-  Maybe<ComparisonResult> result = Compare(x, y, strength);
+Maybe<bool> Object::GreaterThanOrEqual(Handle<Object> x, Handle<Object> y) {
+  Maybe<ComparisonResult> result = Compare(x, y);
   if (result.IsJust()) {
     switch (result.FromJust()) {
       case ComparisonResult::kEqual:
@@ -7011,9 +6900,8 @@
 
 
 // static
-Maybe<bool> Object::LessThan(Handle<Object> x, Handle<Object> y,
-                             Strength strength) {
-  Maybe<ComparisonResult> result = Compare(x, y, strength);
+Maybe<bool> Object::LessThan(Handle<Object> x, Handle<Object> y) {
+  Maybe<ComparisonResult> result = Compare(x, y);
   if (result.IsJust()) {
     switch (result.FromJust()) {
       case ComparisonResult::kLessThan:
@@ -7029,9 +6917,8 @@
 
 
 // static
-Maybe<bool> Object::LessThanOrEqual(Handle<Object> x, Handle<Object> y,
-                                    Strength strength) {
-  Maybe<ComparisonResult> result = Compare(x, y, strength);
+Maybe<bool> Object::LessThanOrEqual(Handle<Object> x, Handle<Object> y) {
+  Maybe<ComparisonResult> result = Compare(x, y);
   if (result.IsJust()) {
     switch (result.FromJust()) {
       case ComparisonResult::kEqual:
@@ -7045,23 +6932,19 @@
   return Nothing<bool>();
 }
 
-
 MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
-                                                 Handle<Name> name,
-                                                 LanguageMode language_mode) {
+                                                 Handle<Name> name) {
   LookupIterator it =
       LookupIterator::PropertyOrElement(name->GetIsolate(), object, name);
-  return GetProperty(&it, language_mode);
+  return GetProperty(&it);
 }
 
-
-MaybeHandle<Object> Object::GetPropertyOrElement(Handle<JSReceiver> holder,
+MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> receiver,
                                                  Handle<Name> name,
-                                                 Handle<Object> receiver,
-                                                 LanguageMode language_mode) {
+                                                 Handle<JSReceiver> holder) {
   LookupIterator it = LookupIterator::PropertyOrElement(
       name->GetIsolate(), receiver, name, holder);
-  return GetProperty(&it, language_mode);
+  return GetProperty(&it);
 }
 
 
@@ -7706,10 +7589,6 @@
 }
 
 
-ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
-ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
-
-
 String::SubStringRange::SubStringRange(String* string, int first, int length)
     : string_(string),
       first_(first),
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index db71650..67bc62e 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -33,7 +33,7 @@
 
 
 void HeapObject::PrintHeader(std::ostream& os, const char* id) {  // NOLINT
-  os << reinterpret_cast<void*>(this) << ": [" << id << "]\n";
+  os << reinterpret_cast<void*>(this) << ": [" << id << "]";
 }
 
 
@@ -146,9 +146,6 @@
     case JS_MAP_ITERATOR_TYPE:
       JSMapIterator::cast(this)->JSMapIteratorPrint(os);
       break;
-    case JS_ITERATOR_RESULT_TYPE:
-      JSIteratorResult::cast(this)->JSIteratorResultPrint(os);
-      break;
     case JS_WEAK_MAP_TYPE:
       JSWeakMap::cast(this)->JSWeakMapPrint(os);
       break;
@@ -326,7 +323,8 @@
     case FAST_HOLEY_SMI_ELEMENTS:
     case FAST_SMI_ELEMENTS:
     case FAST_HOLEY_ELEMENTS:
-    case FAST_ELEMENTS: {
+    case FAST_ELEMENTS:
+    case FAST_STRING_WRAPPER_ELEMENTS: {
       // Print in array notation for non-sparse arrays.
       FixedArray* p = FixedArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
@@ -371,6 +369,8 @@
 #undef PRINT_ELEMENTS
 
     case DICTIONARY_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS:
+      os << "\n - elements: ";
       elements()->Print(os);
       break;
     case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -384,6 +384,8 @@
          << "\n   arguments: " << Brief(p->get(1));
       break;
     }
+    case NO_ELEMENTS:
+      break;
   }
 }
 
@@ -394,7 +396,7 @@
   // Don't call GetElementsKind, its validation code can cause the printer to
   // fail when debugging.
   PrototypeIterator iter(obj->GetIsolate(), obj);
-  os << " - map = " << reinterpret_cast<void*>(obj->map()) << " ["
+  os << "\n - map = " << reinterpret_cast<void*>(obj->map()) << " ["
      << ElementsKindToString(obj->map()->elements_kind())
      << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
 }
@@ -433,7 +435,7 @@
 
 void Symbol::SymbolPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "Symbol");
-  os << " - hash: " << Hash();
+  os << "\n - hash: " << Hash();
   os << "\n - name: " << Brief(name());
   if (name()->IsUndefined()) {
     os << " (" << PrivateSymbolToName() << ")";
@@ -445,31 +447,31 @@
 
 void Map::MapPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "Map");
-  os << " - type: " << instance_type() << "\n";
-  os << " - instance size: " << instance_size() << "\n";
+  os << "\n - type: " << instance_type();
+  os << "\n - instance size: " << instance_size();
   if (IsJSObjectMap()) {
-    os << " - inobject properties: " << GetInObjectProperties() << "\n";
+    os << "\n - inobject properties: " << GetInObjectProperties();
   }
-  os << " - elements kind: " << ElementsKindToString(elements_kind()) << "\n";
-  os << " - unused property fields: " << unused_property_fields() << "\n";
-  if (is_deprecated()) os << " - deprecated_map\n";
-  if (is_stable()) os << " - stable_map\n";
-  if (is_dictionary_map()) os << " - dictionary_map\n";
-  if (is_hidden_prototype()) os << " - hidden_prototype\n";
-  if (has_named_interceptor()) os << " - named_interceptor\n";
-  if (has_indexed_interceptor()) os << " - indexed_interceptor\n";
-  if (is_undetectable()) os << " - undetectable\n";
-  if (is_callable()) os << " - callable\n";
-  if (is_constructor()) os << " - constructor\n";
-  if (is_access_check_needed()) os << " - access_check_needed\n";
-  if (!is_extensible()) os << " - non-extensible\n";
-  if (is_observed()) os << " - observed\n";
-  if (is_strong()) os << " - strong_map\n";
+  os << "\n - elements kind: " << ElementsKindToString(elements_kind());
+  os << "\n - unused property fields: " << unused_property_fields();
+  if (is_deprecated()) os << "\n - deprecated_map";
+  if (is_stable()) os << "\n - stable_map";
+  if (is_dictionary_map()) os << "\n - dictionary_map";
+  if (has_hidden_prototype()) os << "\n - has_hidden_prototype";
+  if (has_named_interceptor()) os << " - named_interceptor";
+  if (has_indexed_interceptor()) os << "\n - indexed_interceptor";
+  if (is_undetectable()) os << "\n - undetectable";
+  if (is_callable()) os << "\n - callable";
+  if (is_constructor()) os << "\n - constructor";
+  if (is_access_check_needed()) os << "\n - access_check_needed";
+  if (!is_extensible()) os << "\n - non-extensible";
+  if (is_observed()) os << "\n - observed";
+  if (is_strong()) os << "\n - strong_map";
   if (is_prototype_map()) {
-    os << " - prototype_map\n";
-    os << " - prototype info: " << Brief(prototype_info());
+    os << "\n - prototype_map";
+    os << "\n - prototype info: " << Brief(prototype_info());
   } else {
-    os << " - back pointer: " << Brief(GetBackPointer());
+    os << "\n - back pointer: " << Brief(GetBackPointer());
   }
   os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
      << "#" << NumberOfOwnDescriptors() << ": "
@@ -508,7 +510,7 @@
 
 void TypeFeedbackInfo::TypeFeedbackInfoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "TypeFeedbackInfo");
-  os << " - ic_total_count: " << ic_total_count()
+  os << "\n - ic_total_count: " << ic_total_count()
      << ", ic_with_type_info_count: " << ic_with_type_info_count()
      << ", ic_generic_count: " << ic_generic_count() << "\n";
 }
@@ -523,7 +525,7 @@
 
 void FixedArray::FixedArrayPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "FixedArray");
-  os << " - length: " << length();
+  os << "\n - length: " << length();
   for (int i = 0; i < length(); i++) {
     os << "\n  [" << i << "]: " << Brief(get(i));
   }
@@ -533,7 +535,7 @@
 
 void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "FixedDoubleArray");
-  os << " - length: " << length();
+  os << "\n - length: " << length();
   for (int i = 0; i < length(); i++) {
     os << "\n  [" << i << "]: ";
     if (is_the_hole(i)) {
@@ -548,7 +550,7 @@
 
 void TransitionArray::TransitionArrayPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "TransitionArray");
-  os << " - capacity: " << length();
+  os << "\n - capacity: " << length();
   for (int i = 0; i < length(); i++) {
     os << "\n  [" << i << "]: " << Brief(get(i));
     if (i == kNextLinkIndex) os << " (next link)";
@@ -569,7 +571,7 @@
 void TypeFeedbackMetadata::TypeFeedbackMetadataPrint(
     std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "TypeFeedbackMetadata");
-  os << " - length: " << length();
+  os << "\n - length: " << length();
   if (length() == 0) {
     os << " (empty)\n";
     return;
@@ -594,7 +596,7 @@
 
 void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "TypeFeedbackVector");
-  os << " - length: " << length();
+  os << "\n - length: " << length();
   if (length() == 0) {
     os << " (empty)\n";
     return;
@@ -737,7 +739,7 @@
 
 void JSProxy::JSProxyPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "JSProxy");
-  os << " - map = " << reinterpret_cast<void*>(map());
+  os << "\n - map = " << reinterpret_cast<void*>(map());
   os << "\n - target = ";
   target()->ShortPrint(os);
   os << "\n - handler = ";
@@ -795,14 +797,6 @@
 }
 
 
-void JSIteratorResult::JSIteratorResultPrint(std::ostream& os) {  // NOLINT
-  JSObjectPrintHeader(os, this, "JSIteratorResult");
-  os << "\n - done = " << Brief(done());
-  os << "\n - value = " << Brief(value());
-  os << "\n";
-}
-
-
 void JSWeakMap::JSWeakMapPrint(std::ostream& os) {  // NOLINT
   JSObjectPrintHeader(os, this, "JSWeakMap");
   os << "\n - table = " << Brief(table());
@@ -874,7 +868,7 @@
 
 void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "SharedFunctionInfo");
-  os << " - name: " << Brief(name());
+  os << "\n - name: " << Brief(name());
   os << "\n - expected_nof_properties: " << expected_nof_properties();
   os << "\n - ast_node_count: " << ast_node_count();
   os << "\n - instance class name = ";
@@ -892,10 +886,16 @@
   // Script files are often large, hard to read.
   // os << "\n - script =";
   // script()->Print(os);
+  if (is_named_expression()) {
+    os << "\n - named expression";
+  } else if (is_anonymous_expression()) {
+    os << "\n - anonymous expression";
+  } else if (is_declaration()) {
+    os << "\n - declaration";
+  }
   os << "\n - function token position = " << function_token_position();
   os << "\n - start position = " << start_position();
   os << "\n - end position = " << end_position();
-  os << "\n - is expression = " << is_expression();
   os << "\n - debug info = " << Brief(debug_info());
   os << "\n - length = " << length();
   os << "\n - optimized_code_map = " << Brief(optimized_code_map());
@@ -926,14 +926,14 @@
 
 void Cell::CellPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "Cell");
-  os << " - value: " << Brief(value());
+  os << "\n - value: " << Brief(value());
   os << "\n";
 }
 
 
 void PropertyCell::PropertyCellPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "PropertyCell");
-  os << " - value: " << Brief(value());
+  os << "\n - value: " << Brief(value());
   os << "\n - details: " << property_details();
   os << "\n";
 }
@@ -952,6 +952,7 @@
 
 void Code::CodePrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "Code");
+  os << "\n";
 #ifdef ENABLE_DISASSEMBLER
   if (FLAG_use_verbose_printer) {
     Disassemble(NULL, os);
@@ -966,9 +967,8 @@
 }
 
 
-void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(
-    std::ostream& os) {  // NOLINT
-  HeapObject::PrintHeader(os, "ExecutableAccessorInfo");
+void AccessorInfo::AccessorInfoPrint(std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "AccessorInfo");
   os << "\n - name: " << Brief(name());
   os << "\n - flag: " << flag();
   os << "\n - getter: " << Brief(getter());
@@ -1046,8 +1046,8 @@
   HeapObject::PrintHeader(os, "FunctionTemplateInfo");
   os << "\n - class name: " << Brief(class_name());
   os << "\n - tag: " << Brief(tag());
-  os << "\n - property_list: " << Brief(property_list());
   os << "\n - serial_number: " << Brief(serial_number());
+  os << "\n - property_list: " << Brief(property_list());
   os << "\n - call_code: " << Brief(call_code());
   os << "\n - property_accessors: " << Brief(property_accessors());
   os << "\n - prototype_template: " << Brief(prototype_template());
@@ -1067,7 +1067,8 @@
 
 void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "ObjectTemplateInfo");
-  os << " - tag: " << Brief(tag());
+  os << "\n - tag: " << Brief(tag());
+  os << "\n - serial_number: " << Brief(serial_number());
   os << "\n - property_list: " << Brief(property_list());
   os << "\n - property_accessors: " << Brief(property_accessors());
   os << "\n - constructor: " << Brief(constructor());
@@ -1078,7 +1079,7 @@
 
 void AllocationSite::AllocationSitePrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "AllocationSite");
-  os << " - weak_next: " << Brief(weak_next());
+  os << "\n - weak_next: " << Brief(weak_next());
   os << "\n - dependent code: " << Brief(dependent_code());
   os << "\n - nested site: " << Brief(nested_site());
   os << "\n - memento found count: "
@@ -1102,7 +1103,7 @@
 
 void AllocationMemento::AllocationMementoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "AllocationMemento");
-  os << " - allocation site: ";
+  os << "\n - allocation site: ";
   if (IsValid()) {
     GetAllocationSite()->Print(os);
   } else {
@@ -1134,7 +1135,7 @@
 void DebugInfo::DebugInfoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "DebugInfo");
   os << "\n - shared: " << Brief(shared());
-  os << "\n - code: " << Brief(code());
+  os << "\n - code: " << Brief(abstract_code());
   os << "\n - break_points: ";
   break_points()->Print(os);
 }
@@ -1142,7 +1143,7 @@
 
 void BreakPointInfo::BreakPointInfoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "BreakPointInfo");
-  os << "\n - code_position: " << code_position();
+  os << "\n - code_offset: " << code_offset();
   os << "\n - source_position: " << source_position();
   os << "\n - statement_position: " << statement_position();
   os << "\n - break_point_objects: " << Brief(break_point_objects());
diff --git a/src/objects.cc b/src/objects.cc
index d9d8213..f577d5e 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -11,6 +11,7 @@
 #include "src/accessors.h"
 #include "src/allocation-site-scopes.h"
 #include "src/api.h"
+#include "src/api-natives.h"
 #include "src/arguments.h"
 #include "src/base/bits.h"
 #include "src/base/utils/random-number-generator.h"
@@ -24,12 +25,14 @@
 #include "src/deoptimizer.h"
 #include "src/elements.h"
 #include "src/execution.h"
-#include "src/field-index.h"
 #include "src/field-index-inl.h"
+#include "src/field-index.h"
+#include "src/field-type.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
 #include "src/identity-map.h"
 #include "src/interpreter/bytecodes.h"
+#include "src/interpreter/source-position-table.h"
 #include "src/isolate-inl.h"
 #include "src/key-accumulator.h"
 #include "src/list.h"
@@ -70,20 +73,19 @@
   return os << "UNKNOWN";  // Keep the compiler happy.
 }
 
-
-Handle<HeapType> Object::OptimalType(Isolate* isolate,
-                                     Representation representation) {
-  if (representation.IsNone()) return HeapType::None(isolate);
+Handle<FieldType> Object::OptimalType(Isolate* isolate,
+                                      Representation representation) {
+  if (representation.IsNone()) return FieldType::None(isolate);
   if (FLAG_track_field_types) {
     if (representation.IsHeapObject() && IsHeapObject()) {
       // We can track only JavaScript objects with stable maps.
       Handle<Map> map(HeapObject::cast(this)->map(), isolate);
       if (map->is_stable() && map->IsJSReceiverMap()) {
-        return HeapType::Class(map, isolate);
+        return FieldType::Class(map, isolate);
       }
     }
   }
-  return HeapType::Any(isolate);
+  return FieldType::Any(isolate);
 }
 
 
@@ -98,7 +100,9 @@
     int constructor_function_index =
         Handle<HeapObject>::cast(object)->map()->GetConstructorFunctionIndex();
     if (constructor_function_index == Map::kNoConstructorFunctionIndex) {
-      return MaybeHandle<JSReceiver>();
+      THROW_NEW_ERROR(isolate,
+                      NewTypeError(MessageTemplate::kUndefinedOrNullToObject),
+                      JSReceiver);
     }
     constructor = handle(
         JSFunction::cast(native_context->get(constructor_function_index)),
@@ -259,14 +263,11 @@
 
 
 // static
-Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y,
-                                        Strength strength) {
-  if (!is_strong(strength)) {
-    // ES6 section 7.2.11 Abstract Relational Comparison step 3 and 4.
-    if (!Object::ToPrimitive(x, ToPrimitiveHint::kNumber).ToHandle(&x) ||
-        !Object::ToPrimitive(y, ToPrimitiveHint::kNumber).ToHandle(&y)) {
-      return Nothing<ComparisonResult>();
-    }
+Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y) {
+  // ES6 section 7.2.11 Abstract Relational Comparison step 3 and 4.
+  if (!Object::ToPrimitive(x, ToPrimitiveHint::kNumber).ToHandle(&x) ||
+      !Object::ToPrimitive(y, ToPrimitiveHint::kNumber).ToHandle(&y)) {
+    return Nothing<ComparisonResult>();
   }
   if (x->IsString() && y->IsString()) {
     // ES6 section 7.2.11 Abstract Relational Comparison step 5.
@@ -274,23 +275,8 @@
         String::Compare(Handle<String>::cast(x), Handle<String>::cast(y)));
   }
   // ES6 section 7.2.11 Abstract Relational Comparison step 6.
-  if (!is_strong(strength)) {
-    if (!Object::ToNumber(x).ToHandle(&x) ||
-        !Object::ToNumber(y).ToHandle(&y)) {
-      return Nothing<ComparisonResult>();
-    }
-  } else {
-    if (!x->IsNumber()) {
-      Isolate* const isolate = Handle<HeapObject>::cast(x)->GetIsolate();
-      isolate->Throw(*isolate->factory()->NewTypeError(
-          MessageTemplate::kStrongImplicitConversion));
-      return Nothing<ComparisonResult>();
-    } else if (!y->IsNumber()) {
-      Isolate* const isolate = Handle<HeapObject>::cast(y)->GetIsolate();
-      isolate->Throw(*isolate->factory()->NewTypeError(
-          MessageTemplate::kStrongImplicitConversion));
-      return Nothing<ComparisonResult>();
-    }
+  if (!Object::ToNumber(x).ToHandle(&x) || !Object::ToNumber(y).ToHandle(&y)) {
+    return Nothing<ComparisonResult>();
   }
   return Just(NumberCompare(x->Number(), y->Number()));
 }
@@ -410,10 +396,10 @@
 // static
 Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
   if (object->IsNumber()) return isolate->factory()->number_string();
-  if (object->IsUndefined() || object->IsUndetectableObject()) {
+  if (object->IsOddball()) return handle(Oddball::cast(*object)->type_of());
+  if (object->IsUndetectableObject()) {
     return isolate->factory()->undefined_string();
   }
-  if (object->IsBoolean()) return isolate->factory()->boolean_string();
   if (object->IsString()) return isolate->factory()->string_string();
   if (object->IsSymbol()) return isolate->factory()->symbol_string();
   if (object->IsString()) return isolate->factory()->string_string();
@@ -428,13 +414,8 @@
 
 // static
 MaybeHandle<Object> Object::Multiply(Isolate* isolate, Handle<Object> lhs,
-                                     Handle<Object> rhs, Strength strength) {
+                                     Handle<Object> rhs) {
   if (!lhs->IsNumber() || !rhs->IsNumber()) {
-    if (is_strong(strength)) {
-      THROW_NEW_ERROR(isolate,
-                      NewTypeError(MessageTemplate::kStrongImplicitConversion),
-                      Object);
-    }
     ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
     ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
   }
@@ -444,13 +425,8 @@
 
 // static
 MaybeHandle<Object> Object::Divide(Isolate* isolate, Handle<Object> lhs,
-                                   Handle<Object> rhs, Strength strength) {
+                                   Handle<Object> rhs) {
   if (!lhs->IsNumber() || !rhs->IsNumber()) {
-    if (is_strong(strength)) {
-      THROW_NEW_ERROR(isolate,
-                      NewTypeError(MessageTemplate::kStrongImplicitConversion),
-                      Object);
-    }
     ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
     ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
   }
@@ -460,13 +436,8 @@
 
 // static
 MaybeHandle<Object> Object::Modulus(Isolate* isolate, Handle<Object> lhs,
-                                    Handle<Object> rhs, Strength strength) {
+                                    Handle<Object> rhs) {
   if (!lhs->IsNumber() || !rhs->IsNumber()) {
-    if (is_strong(strength)) {
-      THROW_NEW_ERROR(isolate,
-                      NewTypeError(MessageTemplate::kStrongImplicitConversion),
-                      Object);
-    }
     ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
     ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
   }
@@ -476,16 +447,12 @@
 
 // static
 MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
-                                Handle<Object> rhs, Strength strength) {
+                                Handle<Object> rhs) {
   if (lhs->IsNumber() && rhs->IsNumber()) {
     return isolate->factory()->NewNumber(lhs->Number() + rhs->Number());
   } else if (lhs->IsString() && rhs->IsString()) {
     return isolate->factory()->NewConsString(Handle<String>::cast(lhs),
                                              Handle<String>::cast(rhs));
-  } else if (is_strong(strength)) {
-    THROW_NEW_ERROR(isolate,
-                    NewTypeError(MessageTemplate::kStrongImplicitConversion),
-                    Object);
   }
   ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToPrimitive(lhs), Object);
   ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToPrimitive(rhs), Object);
@@ -505,13 +472,8 @@
 
 // static
 MaybeHandle<Object> Object::Subtract(Isolate* isolate, Handle<Object> lhs,
-                                     Handle<Object> rhs, Strength strength) {
+                                     Handle<Object> rhs) {
   if (!lhs->IsNumber() || !rhs->IsNumber()) {
-    if (is_strong(strength)) {
-      THROW_NEW_ERROR(isolate,
-                      NewTypeError(MessageTemplate::kStrongImplicitConversion),
-                      Object);
-    }
     ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
     ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
   }
@@ -521,13 +483,8 @@
 
 // static
 MaybeHandle<Object> Object::ShiftLeft(Isolate* isolate, Handle<Object> lhs,
-                                      Handle<Object> rhs, Strength strength) {
+                                      Handle<Object> rhs) {
   if (!lhs->IsNumber() || !rhs->IsNumber()) {
-    if (is_strong(strength)) {
-      THROW_NEW_ERROR(isolate,
-                      NewTypeError(MessageTemplate::kStrongImplicitConversion),
-                      Object);
-    }
     ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
     ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
   }
@@ -538,13 +495,8 @@
 
 // static
 MaybeHandle<Object> Object::ShiftRight(Isolate* isolate, Handle<Object> lhs,
-                                       Handle<Object> rhs, Strength strength) {
+                                       Handle<Object> rhs) {
   if (!lhs->IsNumber() || !rhs->IsNumber()) {
-    if (is_strong(strength)) {
-      THROW_NEW_ERROR(isolate,
-                      NewTypeError(MessageTemplate::kStrongImplicitConversion),
-                      Object);
-    }
     ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
     ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
   }
@@ -556,14 +508,8 @@
 // static
 MaybeHandle<Object> Object::ShiftRightLogical(Isolate* isolate,
                                               Handle<Object> lhs,
-                                              Handle<Object> rhs,
-                                              Strength strength) {
+                                              Handle<Object> rhs) {
   if (!lhs->IsNumber() || !rhs->IsNumber()) {
-    if (is_strong(strength)) {
-      THROW_NEW_ERROR(isolate,
-                      NewTypeError(MessageTemplate::kStrongImplicitConversion),
-                      Object);
-    }
     ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
     ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
   }
@@ -574,13 +520,8 @@
 
 // static
 MaybeHandle<Object> Object::BitwiseAnd(Isolate* isolate, Handle<Object> lhs,
-                                       Handle<Object> rhs, Strength strength) {
+                                       Handle<Object> rhs) {
   if (!lhs->IsNumber() || !rhs->IsNumber()) {
-    if (is_strong(strength)) {
-      THROW_NEW_ERROR(isolate,
-                      NewTypeError(MessageTemplate::kStrongImplicitConversion),
-                      Object);
-    }
     ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
     ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
   }
@@ -591,13 +532,8 @@
 
 // static
 MaybeHandle<Object> Object::BitwiseOr(Isolate* isolate, Handle<Object> lhs,
-                                      Handle<Object> rhs, Strength strength) {
+                                      Handle<Object> rhs) {
   if (!lhs->IsNumber() || !rhs->IsNumber()) {
-    if (is_strong(strength)) {
-      THROW_NEW_ERROR(isolate,
-                      NewTypeError(MessageTemplate::kStrongImplicitConversion),
-                      Object);
-    }
     ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
     ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
   }
@@ -608,13 +544,8 @@
 
 // static
 MaybeHandle<Object> Object::BitwiseXor(Isolate* isolate, Handle<Object> lhs,
-                                       Handle<Object> rhs, Strength strength) {
+                                       Handle<Object> rhs) {
   if (!lhs->IsNumber() || !rhs->IsNumber()) {
-    if (is_strong(strength)) {
-      THROW_NEW_ERROR(isolate,
-                      NewTypeError(MessageTemplate::kStrongImplicitConversion),
-                      Object);
-    }
     ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
     ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
   }
@@ -777,8 +708,7 @@
 
 
 // static
-MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
-                                        LanguageMode language_mode) {
+MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
   for (; it->IsFound(); it->Next()) {
     switch (it->state()) {
       case LookupIterator::NOT_FOUND:
@@ -786,8 +716,7 @@
         UNREACHABLE();
       case LookupIterator::JSPROXY:
         return JSProxy::GetProperty(it->isolate(), it->GetHolder<JSProxy>(),
-                                    it->GetName(), it->GetReceiver(),
-                                    language_mode);
+                                    it->GetName(), it->GetReceiver());
       case LookupIterator::INTERCEPTOR: {
         bool done;
         Handle<Object> result;
@@ -801,14 +730,14 @@
         if (it->HasAccess()) break;
         return JSObject::GetPropertyWithFailedAccessCheck(it);
       case LookupIterator::ACCESSOR:
-        return GetPropertyWithAccessor(it, language_mode);
+        return GetPropertyWithAccessor(it);
       case LookupIterator::INTEGER_INDEXED_EXOTIC:
-        return ReadAbsentProperty(it, language_mode);
+        return ReadAbsentProperty(it);
       case LookupIterator::DATA:
         return it->GetDataValue();
     }
   }
-  return ReadAbsentProperty(it, language_mode);
+  return ReadAbsentProperty(it);
 }
 
 
@@ -827,8 +756,7 @@
 MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
                                          Handle<JSProxy> proxy,
                                          Handle<Name> name,
-                                         Handle<Object> receiver,
-                                         LanguageMode language_mode) {
+                                         Handle<Object> receiver) {
   if (receiver->IsJSGlobalObject()) {
     THROW_NEW_ERROR(
         isolate,
@@ -861,7 +789,7 @@
     // 7.a Return target.[[Get]](P, Receiver).
     LookupIterator it =
         LookupIterator::PropertyOrElement(isolate, receiver, name, target);
-    return Object::GetProperty(&it, language_mode);
+    return Object::GetProperty(&it);
   }
   // 8. Let trapResult be ? Call(trap, handler, «target, P, Receiver»).
   Handle<Object> trap_result;
@@ -934,9 +862,8 @@
         it->NotFound();
         return it->isolate()->factory()->undefined_value();
       case LookupIterator::ACCESSOR:
-        // TODO(verwaest): For now this doesn't call into
-        // ExecutableAccessorInfo, since clients don't need it. Update once
-        // relevant.
+        // TODO(verwaest): For now this doesn't call into AccessorInfo, since
+        // clients don't need it. Update once relevant.
         it->NotFound();
         return it->isolate()->factory()->undefined_value();
       case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1020,9 +947,10 @@
   if (recv_type->IsUndefined()) return receiver;
   FunctionTemplateInfo* signature = FunctionTemplateInfo::cast(recv_type);
   // Check the receiver.
-  for (PrototypeIterator iter(isolate, receiver,
-                              PrototypeIterator::START_AT_RECEIVER);
-       !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
+  for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
+                              PrototypeIterator::START_AT_RECEIVER,
+                              PrototypeIterator::END_AT_NON_HIDDEN);
+       !iter.IsAtEnd(); iter.Advance()) {
     if (signature->IsTemplateFor(iter.GetCurrent())) return iter.GetCurrent();
   }
   return isolate->heap()->null_value();
@@ -1058,7 +986,8 @@
 
 Handle<FixedArray> JSObject::EnsureWritableFastElements(
     Handle<JSObject> object) {
-  DCHECK(object->HasFastSmiOrObjectElements());
+  DCHECK(object->HasFastSmiOrObjectElements() ||
+         object->HasFastStringWrapperElements());
   Isolate* isolate = object->GetIsolate();
   Handle<FixedArray> elems(FixedArray::cast(object->elements()), isolate);
   if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@@ -1096,7 +1025,7 @@
                              Object);
   // 6. If trap is undefined, then return target.[[GetPrototypeOf]]().
   if (trap->IsUndefined()) {
-    return Object::GetPrototype(isolate, target);
+    return JSReceiver::GetPrototype(isolate, target);
   }
   // 7. Let handlerProto be ? Call(trap, handler, «target»).
   Handle<Object> argv[] = {target};
@@ -1118,7 +1047,7 @@
   // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
   Handle<Object> target_proto;
   ASSIGN_RETURN_ON_EXCEPTION(isolate, target_proto,
-                             Object::GetPrototype(isolate, target), Object);
+                             JSReceiver::GetPrototype(isolate, target), Object);
   // 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError.
   if (!handler_proto->SameValue(*target_proto)) {
     THROW_NEW_ERROR(
@@ -1130,9 +1059,7 @@
   return handler_proto;
 }
 
-
-MaybeHandle<Object> Object::GetPropertyWithAccessor(
-    LookupIterator* it, LanguageMode language_mode) {
+MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
   Isolate* isolate = it->isolate();
   Handle<Object> structure = it->GetAccessors();
   Handle<Object> receiver = it->GetReceiver();
@@ -1145,8 +1072,7 @@
   if (structure->IsAccessorInfo()) {
     Handle<JSObject> holder = it->GetHolder<JSObject>();
     Handle<Name> name = it->GetName();
-    Handle<ExecutableAccessorInfo> info =
-        Handle<ExecutableAccessorInfo>::cast(structure);
+    Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(structure);
     if (!info->IsCompatibleReceiver(*receiver)) {
       THROW_NEW_ERROR(isolate,
                       NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
@@ -1159,11 +1085,12 @@
     if (call_fun == nullptr) return isolate->factory()->undefined_value();
 
     LOG(isolate, ApiNamedPropertyAccess("load", *holder, *name));
-    PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder);
+    PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
+                                   Object::DONT_THROW);
     v8::Local<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(name));
     RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
     if (result.IsEmpty()) {
-      return ReadAbsentProperty(isolate, receiver, name, language_mode);
+      return ReadAbsentProperty(isolate, receiver, name);
     }
     Handle<Object> return_value = v8::Utils::OpenHandle(*result);
     return_value->VerifyApiCallResultType();
@@ -1173,13 +1100,24 @@
 
   // Regular accessor.
   Handle<Object> getter(AccessorPair::cast(*structure)->getter(), isolate);
-  if (getter->IsCallable()) {
+  if (getter->IsFunctionTemplateInfo()) {
+    auto result = Builtins::InvokeApiFunction(
+        Handle<FunctionTemplateInfo>::cast(getter), receiver, 0, nullptr);
+    if (isolate->has_pending_exception()) {
+      return MaybeHandle<Object>();
+    }
+    Handle<Object> return_value;
+    if (result.ToHandle(&return_value)) {
+      return_value->VerifyApiCallResultType();
+      return handle(*return_value, isolate);
+    }
+  } else if (getter->IsCallable()) {
     // TODO(rossberg): nicer would be to cast to some JSCallable here...
     return Object::GetPropertyWithDefinedGetter(
         receiver, Handle<JSReceiver>::cast(getter));
   }
   // Getter is not a function.
-  return ReadAbsentProperty(isolate, receiver, it->GetName(), language_mode);
+  return ReadAbsentProperty(isolate, receiver, it->GetName());
 }
 
 
@@ -1192,7 +1130,6 @@
       ->IsTemplateFor(*map);
 }
 
-
 Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
                                             Handle<Object> value,
                                             ShouldThrow should_throw) {
@@ -1205,11 +1142,10 @@
   DCHECK(!structure->IsForeign());
 
   // API style callbacks.
-  if (structure->IsExecutableAccessorInfo()) {
+  if (structure->IsAccessorInfo()) {
     Handle<JSObject> holder = it->GetHolder<JSObject>();
     Handle<Name> name = it->GetName();
-    Handle<ExecutableAccessorInfo> info =
-        Handle<ExecutableAccessorInfo>::cast(structure);
+    Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(structure);
     if (!info->IsCompatibleReceiver(*receiver)) {
       isolate->Throw(*isolate->factory()->NewTypeError(
           MessageTemplate::kIncompatibleMethodReceiver, name, receiver));
@@ -1218,14 +1154,14 @@
 
     v8::AccessorNameSetterCallback call_fun =
         v8::ToCData<v8::AccessorNameSetterCallback>(info->setter());
+    // TODO(verwaest): We should not get here anymore once all AccessorInfos are
+    // marked as special_data_property. They cannot both be writable and not
+    // have a setter.
     if (call_fun == nullptr) return Just(true);
-    // TODO(verwaest): Shouldn't this case be unreachable (at least in the
-    // long run?) Should we have ExecutableAccessorPairs with missing setter
-    // that are "writable"? If they aren't writable, shouldn't we have bailed
-    // out already earlier?
 
     LOG(isolate, ApiNamedPropertyAccess("store", *holder, *name));
-    PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder);
+    PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
+                                   should_throw);
     args.Call(call_fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
     RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
     return Just(true);
@@ -1233,7 +1169,16 @@
 
   // Regular accessor.
   Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
-  if (setter->IsCallable()) {
+  if (setter->IsFunctionTemplateInfo()) {
+    Handle<Object> argv[] = {value};
+    auto result =
+        Builtins::InvokeApiFunction(Handle<FunctionTemplateInfo>::cast(setter),
+                                    receiver, arraysize(argv), argv);
+    if (isolate->has_pending_exception()) {
+      return Nothing<bool>();
+    }
+    return Just(true);
+  } else if (setter->IsCallable()) {
     // TODO(rossberg): nicer would be to cast to some JSCallable here...
     return SetPropertyWithDefinedSetter(
         receiver, Handle<JSReceiver>::cast(setter), value, should_throw);
@@ -1322,7 +1267,7 @@
   Handle<JSObject> checked = it->GetHolder<JSObject>();
   while (AllCanRead(it)) {
     if (it->state() == LookupIterator::ACCESSOR) {
-      return GetPropertyWithAccessor(it, SLOPPY);
+      return GetPropertyWithAccessor(it);
     }
     DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
     bool done;
@@ -1350,7 +1295,7 @@
   Handle<JSObject> checked = it->GetHolder<JSObject>();
   while (AllCanRead(it)) {
     if (it->state() == LookupIterator::ACCESSOR) {
-      return Just(it->property_details().attributes());
+      return Just(it->property_attributes());
     }
     DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
     auto result = GetPropertyAttributesWithInterceptor(it);
@@ -1436,14 +1381,16 @@
   }
 }
 
-
-Maybe<bool> Object::HasInPrototypeChain(Isolate* isolate, Handle<Object> object,
-                                        Handle<Object> proto) {
+Maybe<bool> JSReceiver::HasInPrototypeChain(Isolate* isolate,
+                                            Handle<JSReceiver> object,
+                                            Handle<Object> proto) {
   PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
   while (true) {
     if (!iter.AdvanceFollowingProxies()) return Nothing<bool>();
     if (iter.IsAtEnd()) return Just(false);
-    if (iter.IsAtEnd(proto)) return Just(true);
+    if (PrototypeIterator::GetCurrent(iter).is_identical_to(proto)) {
+      return Just(true);
+    }
   }
 }
 
@@ -1544,25 +1491,22 @@
   if (IsString() && other->IsString()) {
     return String::cast(this)->Equals(String::cast(other));
   }
-  if (IsSimd128Value() && other->IsSimd128Value()) {
-    if (IsFloat32x4() && other->IsFloat32x4()) {
-      Float32x4* a = Float32x4::cast(this);
-      Float32x4* b = Float32x4::cast(other);
-      for (int i = 0; i < 4; i++) {
-        float x = a->get_lane(i);
-        float y = b->get_lane(i);
-        // Implements the ES5 SameValue operation for floating point types.
-        // http://www.ecma-international.org/ecma-262/6.0/#sec-samevalue
-        if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
-        if (std::signbit(x) != std::signbit(y)) return false;
-      }
-      return true;
-    } else {
-      Simd128Value* a = Simd128Value::cast(this);
-      Simd128Value* b = Simd128Value::cast(other);
-      return a->map()->instance_type() == b->map()->instance_type() &&
-             a->BitwiseEquals(b);
+  if (IsFloat32x4() && other->IsFloat32x4()) {
+    Float32x4* a = Float32x4::cast(this);
+    Float32x4* b = Float32x4::cast(other);
+    for (int i = 0; i < 4; i++) {
+      float x = a->get_lane(i);
+      float y = b->get_lane(i);
+      // Implements the ES5 SameValue operation for floating point types.
+      // http://www.ecma-international.org/ecma-262/6.0/#sec-samevalue
+      if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
+      if (std::signbit(x) != std::signbit(y)) return false;
     }
+    return true;
+  } else if (IsSimd128Value() && other->IsSimd128Value()) {
+    Simd128Value* a = Simd128Value::cast(this);
+    Simd128Value* b = Simd128Value::cast(other);
+    return a->map() == b->map() && a->BitwiseEquals(b);
   }
   return false;
 }
@@ -1583,25 +1527,22 @@
   if (IsString() && other->IsString()) {
     return String::cast(this)->Equals(String::cast(other));
   }
-  if (IsSimd128Value() && other->IsSimd128Value()) {
-    if (IsFloat32x4() && other->IsFloat32x4()) {
-      Float32x4* a = Float32x4::cast(this);
-      Float32x4* b = Float32x4::cast(other);
-      for (int i = 0; i < 4; i++) {
-        float x = a->get_lane(i);
-        float y = b->get_lane(i);
-        // Implements the ES6 SameValueZero operation for floating point types.
-        // http://www.ecma-international.org/ecma-262/6.0/#sec-samevaluezero
-        if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
-        // SameValueZero doesn't distinguish between 0 and -0.
-      }
-      return true;
-    } else {
-      Simd128Value* a = Simd128Value::cast(this);
-      Simd128Value* b = Simd128Value::cast(other);
-      return a->map()->instance_type() == b->map()->instance_type() &&
-             a->BitwiseEquals(b);
+  if (IsFloat32x4() && other->IsFloat32x4()) {
+    Float32x4* a = Float32x4::cast(this);
+    Float32x4* b = Float32x4::cast(other);
+    for (int i = 0; i < 4; i++) {
+      float x = a->get_lane(i);
+      float y = b->get_lane(i);
+      // Implements the ES6 SameValueZero operation for floating point types.
+      // http://www.ecma-international.org/ecma-262/6.0/#sec-samevaluezero
+      if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
+      // SameValueZero doesn't distinguish between 0 and -0.
     }
+    return true;
+  } else if (IsSimd128Value() && other->IsSimd128Value()) {
+    Simd128Value* a = Simd128Value::cast(this);
+    Simd128Value* b = Simd128Value::cast(other);
+    return a->map() == b->map() && a->BitwiseEquals(b);
   }
   return false;
 }
@@ -1610,8 +1551,14 @@
 MaybeHandle<Object> Object::ArraySpeciesConstructor(
     Isolate* isolate, Handle<Object> original_array) {
   Handle<Context> native_context = isolate->native_context();
+  Handle<Object> default_species = isolate->array_function();
   if (!FLAG_harmony_species) {
-    return Handle<Object>(native_context->array_function(), isolate);
+    return default_species;
+  }
+  if (original_array->IsJSArray() &&
+      Handle<JSReceiver>::cast(original_array)->map()->new_target_is_base() &&
+      isolate->IsArraySpeciesLookupChainIntact()) {
+    return default_species;
   }
   Handle<Object> constructor = isolate->factory()->undefined_value();
   Maybe<bool> is_array = Object::IsArray(original_array);
@@ -1645,7 +1592,7 @@
     }
   }
   if (constructor->IsUndefined()) {
-    return Handle<Object>(native_context->array_function(), isolate);
+    return default_species;
   } else {
     if (!constructor->IsConstructor()) {
       THROW_NEW_ERROR(isolate,
@@ -2104,17 +2051,12 @@
   os << "]\n";
 }
 
-
-void Map::PrintGeneralization(FILE* file,
-                              const char* reason,
-                              int modify_index,
-                              int split,
-                              int descriptors,
-                              bool constant_to_field,
-                              Representation old_representation,
-                              Representation new_representation,
-                              HeapType* old_field_type,
-                              HeapType* new_field_type) {
+void Map::PrintGeneralization(
+    FILE* file, const char* reason, int modify_index, int split,
+    int descriptors, bool constant_to_field, Representation old_representation,
+    Representation new_representation, MaybeHandle<FieldType> old_field_type,
+    MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type,
+    MaybeHandle<Object> new_value) {
   OFStream os(file);
   os << "[generalizing]";
   Name* name = instance_descriptors()->GetKey(modify_index);
@@ -2128,11 +2070,19 @@
     os << "c";
   } else {
     os << old_representation.Mnemonic() << "{";
-    old_field_type->PrintTo(os, HeapType::SEMANTIC_DIM);
+    if (old_field_type.is_null()) {
+      os << Brief(*(old_value.ToHandleChecked()));
+    } else {
+      old_field_type.ToHandleChecked()->PrintTo(os);
+    }
     os << "}";
   }
   os << "->" << new_representation.Mnemonic() << "{";
-  new_field_type->PrintTo(os, HeapType::SEMANTIC_DIM);
+  if (new_field_type.is_null()) {
+    os << Brief(*(new_value.ToHandleChecked()));
+  } else {
+    new_field_type.ToHandleChecked()->PrintTo(os);
+  }
   os << "} (";
   if (strlen(reason) > 0) {
     os << reason;
@@ -2569,33 +2519,31 @@
 
 
 Context* JSReceiver::GetCreationContext() {
-  if (IsJSBoundFunction()) {
-    return JSBoundFunction::cast(this)->creation_context();
+  JSReceiver* receiver = this;
+  while (receiver->IsJSBoundFunction()) {
+    receiver = JSBoundFunction::cast(receiver)->bound_target_function();
   }
-  Object* constructor = map()->GetConstructor();
+  Object* constructor = receiver->map()->GetConstructor();
   JSFunction* function;
   if (constructor->IsJSFunction()) {
     function = JSFunction::cast(constructor);
   } else {
     // Functions have null as a constructor,
     // but any JSFunction knows its context immediately.
-    CHECK(IsJSFunction());
-    function = JSFunction::cast(this);
+    CHECK(receiver->IsJSFunction());
+    function = JSFunction::cast(receiver);
   }
 
   return function->context()->native_context();
 }
 
-
-static Handle<Object> WrapType(Handle<HeapType> type) {
-  if (type->IsClass()) return Map::WeakCellForMap(type->AsClass()->Map());
+static Handle<Object> WrapType(Handle<FieldType> type) {
+  if (type->IsClass()) return Map::WeakCellForMap(type->AsClass());
   return type;
 }
 
-
-MaybeHandle<Map> Map::CopyWithField(Handle<Map> map,
-                                    Handle<Name> name,
-                                    Handle<HeapType> type,
+MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
+                                    Handle<FieldType> type,
                                     PropertyAttributes attributes,
                                     Representation representation,
                                     TransitionFlag flag) {
@@ -2615,7 +2563,7 @@
 
   if (map->instance_type() == JS_CONTEXT_EXTENSION_OBJECT_TYPE) {
     representation = Representation::Tagged();
-    type = HeapType::Any(isolate);
+    type = FieldType::Any(isolate);
   }
 
   Handle<Object> wrapped_type(WrapType(type));
@@ -2787,59 +2735,7 @@
   }
 }
 
-
-void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
-                            int expected_additional_properties) {
-  if (object->map() == *new_map) return;
-  // If this object is a prototype (the callee will check), invalidate any
-  // prototype chains involving it.
-  InvalidatePrototypeChains(object->map());
-  Handle<Map> old_map(object->map());
-
-  // If the map was registered with its prototype before, ensure that it
-  // registers with its new prototype now. This preserves the invariant that
-  // when a map on a prototype chain is registered with its prototype, then
-  // all prototypes further up the chain are also registered with their
-  // respective prototypes.
-  UpdatePrototypeUserRegistration(old_map, new_map, new_map->GetIsolate());
-
-  if (object->HasFastProperties()) {
-    if (!new_map->is_dictionary_map()) {
-      MigrateFastToFast(object, new_map);
-      if (old_map->is_prototype_map()) {
-        DCHECK(!old_map->is_stable());
-        DCHECK(new_map->is_stable());
-        // Clear out the old descriptor array to avoid problems to sharing
-        // the descriptor array without using an explicit.
-        old_map->InitializeDescriptors(
-            old_map->GetHeap()->empty_descriptor_array(),
-            LayoutDescriptor::FastPointerLayout());
-        // Ensure that no transition was inserted for prototype migrations.
-        DCHECK_EQ(0, TransitionArray::NumberOfTransitions(
-                         old_map->raw_transitions()));
-        DCHECK(new_map->GetBackPointer()->IsUndefined());
-      }
-    } else {
-      MigrateFastToSlow(object, new_map, expected_additional_properties);
-    }
-  } else {
-    // For slow-to-fast migrations JSObject::MigrateSlowToFast()
-    // must be used instead.
-    CHECK(new_map->is_dictionary_map());
-
-    // Slow-to-slow migration is trivial.
-    object->set_map(*new_map);
-  }
-
-  // Careful: Don't allocate here!
-  // For some callers of this method, |object| might be in an inconsistent
-  // state now: the new map might have a new elements_kind, but the object's
-  // elements pointer hasn't been updated yet. Callers will fix this, but in
-  // the meantime, (indirectly) calling JSObjectVerify() must be avoided.
-  // When adding code here, add a DisallowHeapAllocation too.
-}
-
-
+namespace {
 // To migrate a fast instance to a fast map:
 // - First check whether the instance needs to be rewritten. If not, simply
 //   change the map.
@@ -2855,9 +2751,71 @@
 //     to temporarily store the inobject properties.
 //   * If there are properties left in the backing store, install the backing
 //     store.
-void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
+void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
   Isolate* isolate = object->GetIsolate();
   Handle<Map> old_map(object->map());
+  // In case of a regular transition.
+  if (new_map->GetBackPointer() == *old_map) {
+    // If the map does not add named properties, simply set the map.
+    if (old_map->NumberOfOwnDescriptors() ==
+        new_map->NumberOfOwnDescriptors()) {
+      object->synchronized_set_map(*new_map);
+      return;
+    }
+
+    PropertyDetails details = new_map->GetLastDescriptorDetails();
+    // Either new_map adds an kDescriptor property, or a kField property for
+    // which there is still space, and which does not require a mutable double
+    // box (an out-of-object double).
+    if (details.location() == kDescriptor ||
+        (old_map->unused_property_fields() > 0 &&
+         ((FLAG_unbox_double_fields && object->properties()->length() == 0) ||
+          !details.representation().IsDouble()))) {
+      object->synchronized_set_map(*new_map);
+      return;
+    }
+
+    // If there is still space in the object, we need to allocate a mutable
+    // double box.
+    if (old_map->unused_property_fields() > 0) {
+      FieldIndex index =
+          FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
+      DCHECK(details.representation().IsDouble());
+      DCHECK(!new_map->IsUnboxedDoubleField(index));
+      Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+      object->RawFastPropertyAtPut(index, *value);
+      object->synchronized_set_map(*new_map);
+      return;
+    }
+
+    // This migration is a transition from a map that has run out of property
+    // space. Extend the backing store.
+    int grow_by = new_map->unused_property_fields() + 1;
+    Handle<FixedArray> old_storage = handle(object->properties(), isolate);
+    Handle<FixedArray> new_storage =
+        isolate->factory()->CopyFixedArrayAndGrow(old_storage, grow_by);
+
+    // Properly initialize newly added property.
+    Handle<Object> value;
+    if (details.representation().IsDouble()) {
+      value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+    } else {
+      value = isolate->factory()->uninitialized_value();
+    }
+    DCHECK_EQ(DATA, details.type());
+    int target_index = details.field_index() - new_map->GetInObjectProperties();
+    DCHECK(target_index >= 0);  // Must be a backing store index.
+    new_storage->set(target_index, *value);
+
+    // From here on we cannot fail and we shouldn't GC anymore.
+    DisallowHeapAllocation no_allocation;
+
+    // Set the new property value and do the map transition.
+    object->set_properties(*new_storage);
+    object->synchronized_set_map(*new_map);
+    return;
+  }
+
   int old_number_of_fields;
   int number_of_fields = new_map->NumberOfFields();
   int inobject = new_map->GetInObjectProperties();
@@ -2874,53 +2832,6 @@
   int total_size = number_of_fields + unused;
   int external = total_size - inobject;
 
-  if (number_of_fields != old_number_of_fields &&
-      new_map->GetBackPointer() == *old_map) {
-    PropertyDetails details = new_map->GetLastDescriptorDetails();
-
-    if (old_map->unused_property_fields() > 0) {
-      if (details.representation().IsDouble()) {
-        FieldIndex index =
-            FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
-        if (new_map->IsUnboxedDoubleField(index)) {
-          object->RawFastDoublePropertyAtPut(index, 0);
-        } else {
-          Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE);
-          object->RawFastPropertyAtPut(index, *value);
-        }
-      }
-      object->synchronized_set_map(*new_map);
-      return;
-    }
-
-    DCHECK(number_of_fields == old_number_of_fields + 1);
-    // This migration is a transition from a map that has run out of property
-    // space. Therefore it could be done by extending the backing store.
-    int grow_by = external - object->properties()->length();
-    Handle<FixedArray> old_storage = handle(object->properties(), isolate);
-    Handle<FixedArray> new_storage =
-        isolate->factory()->CopyFixedArrayAndGrow(old_storage, grow_by);
-
-    // Properly initialize newly added property.
-    Handle<Object> value;
-    if (details.representation().IsDouble()) {
-      value = isolate->factory()->NewHeapNumber(0, MUTABLE);
-    } else {
-      value = isolate->factory()->uninitialized_value();
-    }
-    DCHECK(details.type() == DATA);
-    int target_index = details.field_index() - inobject;
-    DCHECK(target_index >= 0);  // Must be a backing store index.
-    new_storage->set(target_index, *value);
-
-    // From here on we cannot fail and we shouldn't GC anymore.
-    DisallowHeapAllocation no_allocation;
-
-    // Set the new property value and do the map transition.
-    object->set_properties(*new_storage);
-    object->synchronized_set_map(*new_map);
-    return;
-  }
   Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size);
 
   Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
@@ -2994,6 +2905,8 @@
   // From here on we cannot fail and we shouldn't GC anymore.
   DisallowHeapAllocation no_allocation;
 
+  Heap* heap = isolate->heap();
+
   // Copy (real) inobject properties. If necessary, stop at number_of_fields to
   // avoid overwriting |one_pointer_filler_map|.
   int limit = Min(inobject, number_of_fields);
@@ -3006,12 +2919,16 @@
       DCHECK(value->IsMutableHeapNumber());
       object->RawFastDoublePropertyAtPut(index,
                                          HeapNumber::cast(value)->value());
+      if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
+        // Transition from tagged to untagged slot.
+        heap->ClearRecordedSlot(*object,
+                                HeapObject::RawField(*object, index.offset()));
+      }
     } else {
       object->RawFastPropertyAtPut(index, value);
     }
   }
 
-  Heap* heap = isolate->heap();
 
   // If there are properties in the new backing store, trim it to the correct
   // size and install the backing store into the object.
@@ -3038,6 +2955,173 @@
   object->synchronized_set_map(*new_map);
 }
 
+void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
+                       int expected_additional_properties) {
+  // The global object is always normalized.
+  DCHECK(!object->IsJSGlobalObject());
+  // JSGlobalProxy must never be normalized
+  DCHECK(!object->IsJSGlobalProxy());
+
+  Isolate* isolate = object->GetIsolate();
+  HandleScope scope(isolate);
+  Handle<Map> map(object->map());
+
+  // Allocate new content.
+  int real_size = map->NumberOfOwnDescriptors();
+  int property_count = real_size;
+  if (expected_additional_properties > 0) {
+    property_count += expected_additional_properties;
+  } else {
+    property_count += 2;  // Make space for two more properties.
+  }
+  Handle<NameDictionary> dictionary =
+      NameDictionary::New(isolate, property_count);
+
+  Handle<DescriptorArray> descs(map->instance_descriptors());
+  for (int i = 0; i < real_size; i++) {
+    PropertyDetails details = descs->GetDetails(i);
+    Handle<Name> key(descs->GetKey(i));
+    switch (details.type()) {
+      case DATA_CONSTANT: {
+        Handle<Object> value(descs->GetConstant(i), isolate);
+        PropertyDetails d(details.attributes(), DATA, i + 1,
+                          PropertyCellType::kNoCell);
+        dictionary = NameDictionary::Add(dictionary, key, value, d);
+        break;
+      }
+      case DATA: {
+        FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+        Handle<Object> value;
+        if (object->IsUnboxedDoubleField(index)) {
+          double old_value = object->RawFastDoublePropertyAt(index);
+          value = isolate->factory()->NewHeapNumber(old_value);
+        } else {
+          value = handle(object->RawFastPropertyAt(index), isolate);
+          if (details.representation().IsDouble()) {
+            DCHECK(value->IsMutableHeapNumber());
+            Handle<HeapNumber> old = Handle<HeapNumber>::cast(value);
+            value = isolate->factory()->NewHeapNumber(old->value());
+          }
+        }
+        PropertyDetails d(details.attributes(), DATA, i + 1,
+                          PropertyCellType::kNoCell);
+        dictionary = NameDictionary::Add(dictionary, key, value, d);
+        break;
+      }
+      case ACCESSOR: {
+        FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+        Handle<Object> value(object->RawFastPropertyAt(index), isolate);
+        PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+                          PropertyCellType::kNoCell);
+        dictionary = NameDictionary::Add(dictionary, key, value, d);
+        break;
+      }
+      case ACCESSOR_CONSTANT: {
+        Handle<Object> value(descs->GetCallbacksObject(i), isolate);
+        PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+                          PropertyCellType::kNoCell);
+        dictionary = NameDictionary::Add(dictionary, key, value, d);
+        break;
+      }
+    }
+  }
+
+  // Copy the next enumeration index from instance descriptor.
+  dictionary->SetNextEnumerationIndex(real_size + 1);
+
+  // From here on we cannot fail and we shouldn't GC anymore.
+  DisallowHeapAllocation no_allocation;
+
+  // Resize the object in the heap if necessary.
+  int new_instance_size = new_map->instance_size();
+  int instance_size_delta = map->instance_size() - new_instance_size;
+  DCHECK(instance_size_delta >= 0);
+
+  if (instance_size_delta > 0) {
+    Heap* heap = isolate->heap();
+    heap->CreateFillerObjectAt(object->address() + new_instance_size,
+                               instance_size_delta);
+    heap->AdjustLiveBytes(*object, -instance_size_delta,
+                          Heap::CONCURRENT_TO_SWEEPER);
+  }
+
+  // We are storing the new map using release store after creating a filler for
+  // the left-over space to avoid races with the sweeper thread.
+  object->synchronized_set_map(*new_map);
+
+  object->set_properties(*dictionary);
+
+  // Ensure that in-object space of slow-mode object does not contain random
+  // garbage.
+  int inobject_properties = new_map->GetInObjectProperties();
+  for (int i = 0; i < inobject_properties; i++) {
+    FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
+    object->RawFastPropertyAtPut(index, Smi::FromInt(0));
+  }
+
+  isolate->counters()->props_to_dictionary()->Increment();
+
+#ifdef DEBUG
+  if (FLAG_trace_normalization) {
+    OFStream os(stdout);
+    os << "Object properties have been normalized:\n";
+    object->Print(os);
+  }
+#endif
+}
+
+}  // namespace
+
+void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
+                            int expected_additional_properties) {
+  if (object->map() == *new_map) return;
+  Handle<Map> old_map(object->map());
+  if (old_map->is_prototype_map()) {
+    // If this object is a prototype (the callee will check), invalidate any
+    // prototype chains involving it.
+    InvalidatePrototypeChains(object->map());
+
+    // If the map was registered with its prototype before, ensure that it
+    // registers with its new prototype now. This preserves the invariant that
+    // when a map on a prototype chain is registered with its prototype, then
+    // all prototypes further up the chain are also registered with their
+    // respective prototypes.
+    UpdatePrototypeUserRegistration(old_map, new_map, new_map->GetIsolate());
+  }
+
+  if (old_map->is_dictionary_map()) {
+    // For slow-to-fast migrations JSObject::MigrateSlowToFast()
+    // must be used instead.
+    CHECK(new_map->is_dictionary_map());
+
+    // Slow-to-slow migration is trivial.
+    object->set_map(*new_map);
+  } else if (!new_map->is_dictionary_map()) {
+    MigrateFastToFast(object, new_map);
+    if (old_map->is_prototype_map()) {
+      DCHECK(!old_map->is_stable());
+      DCHECK(new_map->is_stable());
+      // Clear out the old descriptor array to avoid problems to sharing
+      // the descriptor array without using an explicit.
+      old_map->InitializeDescriptors(
+          old_map->GetHeap()->empty_descriptor_array(),
+          LayoutDescriptor::FastPointerLayout());
+      // Ensure that no transition was inserted for prototype migrations.
+      DCHECK_EQ(
+          0, TransitionArray::NumberOfTransitions(old_map->raw_transitions()));
+      DCHECK(new_map->GetBackPointer()->IsUndefined());
+    }
+  } else {
+    MigrateFastToSlow(object, new_map, expected_additional_properties);
+  }
+
+  // Careful: Don't allocate here!
+  // For some callers of this method, |object| might be in an inconsistent
+  // state now: the new map might have a new elements_kind, but the object's
+  // elements pointer hasn't been updated yet. Callers will fix this, but in
+  // the meantime, (indirectly) calling JSObjectVerify() must be avoided.
+  // When adding code here, add a DisallowHeapAllocation too.
+}
 
 int Map::NumberOfFields() {
   DescriptorArray* descriptors = instance_descriptors();
@@ -3061,7 +3145,7 @@
   for (int i = 0; i < number_of_own_descriptors; i++) {
     descriptors->SetRepresentation(i, Representation::Tagged());
     if (descriptors->GetDetails(i).type() == DATA) {
-      descriptors->SetValue(i, HeapType::Any());
+      descriptors->SetValue(i, FieldType::Any());
     }
   }
 
@@ -3093,16 +3177,18 @@
     }
 
     if (FLAG_trace_generalization) {
-      HeapType* field_type =
-          (details.type() == DATA)
-              ? map->instance_descriptors()->GetFieldType(modify_index)
-              : NULL;
+      MaybeHandle<FieldType> field_type = FieldType::None(isolate);
+      if (details.type() == DATA) {
+        field_type = handle(
+            map->instance_descriptors()->GetFieldType(modify_index), isolate);
+      }
       map->PrintGeneralization(
           stdout, reason, modify_index, new_map->NumberOfOwnDescriptors(),
           new_map->NumberOfOwnDescriptors(),
           details.type() == DATA_CONSTANT && store_mode == FORCE_FIELD,
           details.representation(), Representation::Tagged(), field_type,
-          HeapType::Any());
+          MaybeHandle<Object>(), FieldType::Any(isolate),
+          MaybeHandle<Object>());
     }
   }
   return new_map;
@@ -3195,7 +3281,7 @@
     if (!details.representation().Equals(next_details.representation())) break;
 
     if (next_details.location() == kField) {
-      HeapType* next_field_type = next_descriptors->GetFieldType(i);
+      FieldType* next_field_type = next_descriptors->GetFieldType(i);
       if (!descriptors->GetFieldType(i)->NowIs(next_field_type)) {
         break;
       }
@@ -3251,42 +3337,41 @@
   instance_descriptors()->Replace(descriptor, &d);
 }
 
-
-bool FieldTypeIsCleared(Representation rep, HeapType* type) {
-  return type->Is(HeapType::None()) && rep.IsHeapObject();
+bool FieldTypeIsCleared(Representation rep, FieldType* type) {
+  return type->IsNone() && rep.IsHeapObject();
 }
 
 
 // static
-Handle<HeapType> Map::GeneralizeFieldType(Representation rep1,
-                                          Handle<HeapType> type1,
-                                          Representation rep2,
-                                          Handle<HeapType> type2,
-                                          Isolate* isolate) {
+Handle<FieldType> Map::GeneralizeFieldType(Representation rep1,
+                                           Handle<FieldType> type1,
+                                           Representation rep2,
+                                           Handle<FieldType> type2,
+                                           Isolate* isolate) {
   // Cleared field types need special treatment. They represent lost knowledge,
   // so we must be conservative, so their generalization with any other type
   // is "Any".
   if (FieldTypeIsCleared(rep1, *type1) || FieldTypeIsCleared(rep2, *type2)) {
-    return HeapType::Any(isolate);
+    return FieldType::Any(isolate);
   }
   if (type1->NowIs(type2)) return type2;
   if (type2->NowIs(type1)) return type1;
-  return HeapType::Any(isolate);
+  return FieldType::Any(isolate);
 }
 
 
 // static
 void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
                               Representation new_representation,
-                              Handle<HeapType> new_field_type) {
+                              Handle<FieldType> new_field_type) {
   Isolate* isolate = map->GetIsolate();
 
   // Check if we actually need to generalize the field type at all.
   Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
   Representation old_representation =
       old_descriptors->GetDetails(modify_index).representation();
-  Handle<HeapType> old_field_type(old_descriptors->GetFieldType(modify_index),
-                                  isolate);
+  Handle<FieldType> old_field_type(old_descriptors->GetFieldType(modify_index),
+                                   isolate);
 
   if (old_representation.Equals(new_representation) &&
       !FieldTypeIsCleared(new_representation, *new_field_type) &&
@@ -3320,20 +3405,16 @@
 
   if (FLAG_trace_generalization) {
     map->PrintGeneralization(
-        stdout, "field type generalization",
-        modify_index, map->NumberOfOwnDescriptors(),
-        map->NumberOfOwnDescriptors(), false,
-        details.representation(), details.representation(),
-        *old_field_type, *new_field_type);
+        stdout, "field type generalization", modify_index,
+        map->NumberOfOwnDescriptors(), map->NumberOfOwnDescriptors(), false,
+        details.representation(), details.representation(), old_field_type,
+        MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>());
   }
 }
 
-
-static inline Handle<HeapType> GetFieldType(Isolate* isolate,
-                                            Handle<DescriptorArray> descriptors,
-                                            int descriptor,
-                                            PropertyLocation location,
-                                            Representation representation) {
+static inline Handle<FieldType> GetFieldType(
+    Isolate* isolate, Handle<DescriptorArray> descriptors, int descriptor,
+    PropertyLocation location, Representation representation) {
 #ifdef DEBUG
   PropertyDetails details = descriptors->GetDetails(descriptor);
   DCHECK_EQ(kData, details.kind());
@@ -3378,7 +3459,7 @@
                                      PropertyKind new_kind,
                                      PropertyAttributes new_attributes,
                                      Representation new_representation,
-                                     Handle<HeapType> new_field_type,
+                                     Handle<FieldType> new_field_type,
                                      StoreMode store_mode) {
   DCHECK_NE(kAccessor, new_kind);  // TODO(ishell): not supported yet.
   DCHECK(store_mode != FORCE_FIELD || modify_index >= 0);
@@ -3407,8 +3488,9 @@
             stdout, "uninitialized field", modify_index,
             old_map->NumberOfOwnDescriptors(),
             old_map->NumberOfOwnDescriptors(), false, old_representation,
-            new_representation, old_descriptors->GetFieldType(modify_index),
-            *new_field_type);
+            new_representation,
+            handle(old_descriptors->GetFieldType(modify_index), isolate),
+            MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>());
       }
       Handle<Map> field_owner(old_map->FindFieldOwner(modify_index), isolate);
 
@@ -3524,11 +3606,11 @@
     PropertyLocation tmp_location = tmp_details.location();
     if (tmp_location == kField) {
       if (next_kind == kData) {
-        Handle<HeapType> next_field_type;
+        Handle<FieldType> next_field_type;
         if (modify_index == i) {
           next_field_type = new_field_type;
           if (!property_kind_reconfiguration) {
-            Handle<HeapType> old_field_type =
+            Handle<FieldType> old_field_type =
                 GetFieldType(isolate, old_descriptors, i,
                              old_details.location(), tmp_representation);
             Representation old_representation = old_details.representation();
@@ -3537,7 +3619,7 @@
                 next_field_type, isolate);
           }
         } else {
-          Handle<HeapType> old_field_type =
+          Handle<FieldType> old_field_type =
               GetFieldType(isolate, old_descriptors, i, old_details.location(),
                            tmp_representation);
           next_field_type = old_field_type;
@@ -3692,17 +3774,17 @@
 
     if (next_location == kField) {
       if (next_kind == kData) {
-        Handle<HeapType> target_field_type =
+        Handle<FieldType> target_field_type =
             GetFieldType(isolate, target_descriptors, i,
                          target_details.location(), next_representation);
 
-        Handle<HeapType> next_field_type;
+        Handle<FieldType> next_field_type;
         if (modify_index == i) {
           next_field_type = GeneralizeFieldType(
               target_details.representation(), target_field_type,
               new_representation, new_field_type, isolate);
           if (!property_kind_reconfiguration) {
-            Handle<HeapType> old_field_type =
+            Handle<FieldType> old_field_type =
                 GetFieldType(isolate, old_descriptors, i,
                              old_details.location(), next_representation);
             next_field_type = GeneralizeFieldType(
@@ -3710,7 +3792,7 @@
                 next_representation, next_field_type, isolate);
           }
         } else {
-          Handle<HeapType> old_field_type =
+          Handle<FieldType> old_field_type =
               GetFieldType(isolate, old_descriptors, i, old_details.location(),
                            next_representation);
           next_field_type = GeneralizeFieldType(
@@ -3769,11 +3851,11 @@
 
     if (next_location == kField) {
       if (next_kind == kData) {
-        Handle<HeapType> next_field_type;
+        Handle<FieldType> next_field_type;
         if (modify_index == i) {
           next_field_type = new_field_type;
           if (!property_kind_reconfiguration) {
-            Handle<HeapType> old_field_type =
+            Handle<FieldType> old_field_type =
                 GetFieldType(isolate, old_descriptors, i,
                              old_details.location(), next_representation);
             next_field_type = GeneralizeFieldType(
@@ -3781,7 +3863,7 @@
                 next_representation, next_field_type, isolate);
           }
         } else {
-          Handle<HeapType> old_field_type =
+          Handle<FieldType> old_field_type =
               GetFieldType(isolate, old_descriptors, i, old_details.location(),
                            next_representation);
           next_field_type = old_field_type;
@@ -3849,23 +3931,28 @@
   if (FLAG_trace_generalization && modify_index >= 0) {
     PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
     PropertyDetails new_details = new_descriptors->GetDetails(modify_index);
-    Handle<HeapType> old_field_type =
-        (old_details.type() == DATA)
-            ? handle(old_descriptors->GetFieldType(modify_index), isolate)
-            : HeapType::Constant(
-                  handle(old_descriptors->GetValue(modify_index), isolate),
-                  isolate);
-    Handle<HeapType> new_field_type =
-        (new_details.type() == DATA)
-            ? handle(new_descriptors->GetFieldType(modify_index), isolate)
-            : HeapType::Constant(
-                  handle(new_descriptors->GetValue(modify_index), isolate),
-                  isolate);
+    MaybeHandle<FieldType> old_field_type;
+    MaybeHandle<FieldType> new_field_type;
+    MaybeHandle<Object> old_value;
+    MaybeHandle<Object> new_value;
+    if (old_details.type() == DATA) {
+      old_field_type =
+          handle(old_descriptors->GetFieldType(modify_index), isolate);
+    } else {
+      old_value = handle(old_descriptors->GetValue(modify_index), isolate);
+    }
+    if (new_details.type() == DATA) {
+      new_field_type =
+          handle(new_descriptors->GetFieldType(modify_index), isolate);
+    } else {
+      new_value = handle(new_descriptors->GetValue(modify_index), isolate);
+    }
+
     old_map->PrintGeneralization(
         stdout, "", modify_index, split_nof, old_nof,
         old_details.location() == kDescriptor && store_mode == FORCE_FIELD,
         old_details.representation(), new_details.representation(),
-        *old_field_type, *new_field_type);
+        old_field_type, old_value, new_field_type, new_value);
   }
 
   Handle<LayoutDescriptor> new_layout_descriptor =
@@ -3891,7 +3978,7 @@
     if (details.type() == DATA) {
       map = ReconfigureProperty(map, i, kData, details.attributes(),
                                 Representation::Tagged(),
-                                HeapType::Any(map->GetIsolate()), FORCE_FIELD);
+                                FieldType::Any(map->GetIsolate()), FORCE_FIELD);
     }
   }
   return map;
@@ -3940,7 +4027,7 @@
     }
     switch (new_details.type()) {
       case DATA: {
-        HeapType* new_type = new_descriptors->GetFieldType(i);
+        FieldType* new_type = new_descriptors->GetFieldType(i);
         // Cleared field types need special treatment. They represent lost
         // knowledge, so we must first generalize the new_type to "Any".
         if (FieldTypeIsCleared(new_details.representation(), new_type)) {
@@ -3948,7 +4035,7 @@
         }
         PropertyType old_property_type = old_details.type();
         if (old_property_type == DATA) {
-          HeapType* old_type = old_descriptors->GetFieldType(i);
+          FieldType* old_type = old_descriptors->GetFieldType(i);
           if (FieldTypeIsCleared(old_details.representation(), old_type) ||
               !old_type->NowIs(new_type)) {
             return MaybeHandle<Map>();
@@ -3964,8 +4051,8 @@
       }
       case ACCESSOR: {
 #ifdef DEBUG
-        HeapType* new_type = new_descriptors->GetFieldType(i);
-        DCHECK(HeapType::Any()->Is(new_type));
+        FieldType* new_type = new_descriptors->GetFieldType(i);
+        DCHECK(new_type->IsAny());
 #endif
         break;
       }
@@ -3990,12 +4077,13 @@
 Handle<Map> Map::Update(Handle<Map> map) {
   if (!map->is_deprecated()) return map;
   return ReconfigureProperty(map, -1, kData, NONE, Representation::None(),
-                             HeapType::None(map->GetIsolate()),
+                             FieldType::None(map->GetIsolate()),
                              ALLOW_IN_DESCRIPTOR);
 }
 
 
 Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
+                                                 ShouldThrow should_throw,
                                                  Handle<Object> value) {
   Isolate* isolate = it->isolate();
   // Make sure that the top context does not change when doing callbacks or
@@ -4009,7 +4097,7 @@
   Handle<JSObject> holder = it->GetHolder<JSObject>();
   v8::Local<v8::Value> result;
   PropertyCallbackArguments args(isolate, interceptor->data(),
-                                 *it->GetReceiver(), *holder);
+                                 *it->GetReceiver(), *holder, should_throw);
 
   if (it->IsElement()) {
     uint32_t index = it->index();
@@ -4062,6 +4150,7 @@
                                         LanguageMode language_mode,
                                         StoreFromKeyed store_mode,
                                         bool* found) {
+  it->UpdateProtector();
   ShouldThrow should_throw =
       is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
 
@@ -4090,7 +4179,8 @@
 
       case LookupIterator::INTERCEPTOR:
         if (it->HolderIsReceiverOrHiddenPrototype()) {
-          Maybe<bool> result = JSObject::SetPropertyWithInterceptor(it, value);
+          Maybe<bool> result =
+              JSObject::SetPropertyWithInterceptor(it, should_throw, value);
           if (result.IsNothing() || result.FromJust()) return result;
         } else {
           Maybe<PropertyAttributes> maybe_attributes =
@@ -4155,16 +4245,12 @@
 Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
                                 LanguageMode language_mode,
                                 StoreFromKeyed store_mode) {
-  ShouldThrow should_throw =
-      is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
-  if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
-    RETURN_FAILURE(it->isolate(), should_throw,
-                   NewTypeError(MessageTemplate::kProxyPrivate));
-  }
   bool found = false;
   Maybe<bool> result =
       SetPropertyInternal(it, value, language_mode, store_mode, &found);
   if (found) return result;
+  ShouldThrow should_throw =
+      is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
   return AddDataProperty(it, value, NONE, should_throw, store_mode);
 }
 
@@ -4172,13 +4258,7 @@
 Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
                                      LanguageMode language_mode,
                                      StoreFromKeyed store_mode) {
-  ShouldThrow should_throw =
-      is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
   Isolate* isolate = it->isolate();
-  if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
-    RETURN_FAILURE(isolate, should_throw,
-                   NewTypeError(MessageTemplate::kProxyPrivate));
-  }
 
   bool found = false;
   Maybe<bool> result =
@@ -4188,6 +4268,9 @@
   // The property either doesn't exist on the holder or exists there as a data
   // property.
 
+  ShouldThrow should_throw =
+      is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+
   if (!it->GetReceiver()->IsJSReceiver()) {
     return WriteToReadOnlyProperty(it, value, should_throw);
   }
@@ -4207,14 +4290,21 @@
         }
         break;
 
-      case LookupIterator::INTEGER_INDEXED_EXOTIC:
       case LookupIterator::ACCESSOR:
+        if (own_lookup.GetAccessors()->IsAccessorInfo()) {
+          if (own_lookup.IsReadOnly()) {
+            return WriteToReadOnlyProperty(&own_lookup, value, should_throw);
+          }
+          return JSObject::SetPropertyWithAccessor(&own_lookup, value,
+                                                   should_throw);
+        }
+      // Fall through.
+      case LookupIterator::INTEGER_INDEXED_EXOTIC:
         return RedefineIncompatibleProperty(isolate, it->GetName(), value,
                                             should_throw);
 
       case LookupIterator::DATA: {
-        PropertyDetails details = own_lookup.property_details();
-        if (details.IsReadOnly()) {
+        if (own_lookup.IsReadOnly()) {
           return WriteToReadOnlyProperty(&own_lookup, value, should_throw);
         }
         return SetDataProperty(&own_lookup, value);
@@ -4252,28 +4342,13 @@
                                    store_mode);
 }
 
-
-MaybeHandle<Object> Object::ReadAbsentProperty(LookupIterator* it,
-                                               LanguageMode language_mode) {
-  if (is_strong(language_mode)) {
-    THROW_NEW_ERROR(it->isolate(),
-                    NewTypeError(MessageTemplate::kStrongPropertyAccess,
-                                 it->GetName(), it->GetReceiver()),
-                    Object);
-  }
+MaybeHandle<Object> Object::ReadAbsentProperty(LookupIterator* it) {
   return it->isolate()->factory()->undefined_value();
 }
 
 MaybeHandle<Object> Object::ReadAbsentProperty(Isolate* isolate,
                                                Handle<Object> receiver,
-                                               Handle<Object> name,
-                                               LanguageMode language_mode) {
-  if (is_strong(language_mode)) {
-    THROW_NEW_ERROR(
-        isolate,
-        NewTypeError(MessageTemplate::kStrongPropertyAccess, name, receiver),
-        Object);
-  }
+                                               Handle<Object> name) {
   return isolate->factory()->undefined_value();
 }
 
@@ -4330,8 +4405,7 @@
   // Fetch before transforming the object since the encoding may become
   // incompatible with what's cached in |it|.
   bool is_observed = receiver->map()->is_observed() &&
-                     (it->IsElement() ||
-                      !it->isolate()->IsInternallyUsedPropertyName(it->name()));
+                     (it->IsElement() || !it->name()->IsPrivate());
   MaybeHandle<Object> maybe_old;
   if (is_observed) maybe_old = it->GetDataValue();
 
@@ -4341,13 +4415,6 @@
     if (!value->IsNumber() && !value->IsUndefined()) {
       ASSIGN_RETURN_ON_EXCEPTION_VALUE(
           it->isolate(), to_assign, Object::ToNumber(value), Nothing<bool>());
-      // ToNumber above might modify the receiver, causing the cached
-      // holder_map to mismatch the actual holder->map() after this point.
-      // Reload the map to be in consistent state. Other cached state cannot
-      // have been invalidated since typed array elements cannot be reconfigured
-      // in any way.
-      it->ReloadHolderMap();
-
       // We have to recheck the length. However, it can only change if the
       // underlying buffer was neutered, so just check that.
       if (Handle<JSArrayBufferView>::cast(receiver)->WasNeutered()) {
@@ -4427,8 +4494,11 @@
                                     PropertyAttributes attributes,
                                     ShouldThrow should_throw,
                                     StoreFromKeyed store_mode) {
-  DCHECK(!it->GetReceiver()->IsJSProxy());
   if (!it->GetReceiver()->IsJSObject()) {
+    if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
+      RETURN_FAILURE(it->isolate(), should_throw,
+                     NewTypeError(MessageTemplate::kProxyPrivate));
+    }
     return CannotCreateProperty(it->isolate(), it->GetReceiver(), it->GetName(),
                                 value, should_throw);
   }
@@ -4443,8 +4513,7 @@
 
   Isolate* isolate = it->isolate();
 
-  if (!receiver->map()->is_extensible() &&
-      (it->IsElement() || !isolate->IsInternallyUsedPropertyName(it->name()))) {
+  if (it->ExtendingNonExtensible(receiver)) {
     RETURN_FAILURE(
         isolate, should_throw,
         NewTypeError(MessageTemplate::kObjectNotExtensible, it->GetName()));
@@ -4477,14 +4546,13 @@
   } else {
     // Migrate to the most up-to-date map that will be able to store |value|
     // under it->name() with |attributes|.
-    it->PrepareTransitionToDataProperty(value, attributes, store_mode);
+    it->PrepareTransitionToDataProperty(receiver, value, attributes,
+                                        store_mode);
     DCHECK_EQ(LookupIterator::TRANSITION, it->state());
-    it->ApplyTransitionToDataProperty();
+    it->ApplyTransitionToDataProperty(receiver);
 
     // TODO(verwaest): Encapsulate dictionary handling better.
     if (receiver->map()->is_dictionary_map()) {
-      // TODO(verwaest): Probably should ensure this is done beforehand.
-      it->InternalizeName();
       // TODO(dcarney): just populate TransitionPropertyCell here?
       JSObject::AddSlowProperty(receiver, it->name(), value, attributes);
     } else {
@@ -4493,8 +4561,7 @@
     }
 
     // Send the change record if there are observers.
-    if (receiver->map()->is_observed() &&
-        !isolate->IsInternallyUsedPropertyName(it->name())) {
+    if (receiver->map()->is_observed() && !it->name()->IsPrivate()) {
       RETURN_ON_EXCEPTION_VALUE(isolate, JSObject::EnqueueChangeRecord(
                                              receiver, "add", it->name(),
                                              it->factory()->the_hole_value()),
@@ -5195,8 +5262,7 @@
   Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
   DCHECK(maybe.IsJust());
   DCHECK(!it.IsFound());
-  DCHECK(object->map()->is_extensible() ||
-         it.isolate()->IsInternallyUsedPropertyName(name));
+  DCHECK(object->map()->is_extensible() || name->IsPrivate());
 #endif
   CHECK(AddDataProperty(&it, value, attributes, THROW_ON_ERROR,
                         CERTAINLY_NOT_STORE_FROM_KEYED)
@@ -5204,20 +5270,13 @@
 }
 
 
-// static
-void ExecutableAccessorInfo::ClearSetter(Handle<ExecutableAccessorInfo> info) {
-  Handle<Object> object = v8::FromCData(info->GetIsolate(), nullptr);
-  info->set_setter(*object);
-}
-
-
 // Reconfigures a property to a data property with attributes, even if it is not
 // reconfigurable.
 // Requires a LookupIterator that does not look at the prototype chain beyond
 // hidden prototypes.
 MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
     LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
-    ExecutableAccessorInfoHandling handling) {
+    AccessorInfoHandling handling) {
   MAYBE_RETURN_NULL(DefineOwnPropertyIgnoreAttributes(
       it, value, attributes, THROW_ON_ERROR, handling));
   return value;
@@ -5226,11 +5285,11 @@
 
 Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
     LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
-    ShouldThrow should_throw, ExecutableAccessorInfoHandling handling) {
+    ShouldThrow should_throw, AccessorInfoHandling handling) {
+  it->UpdateProtector();
   Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
   bool is_observed = object->map()->is_observed() &&
-                     (it->IsElement() ||
-                      !it->isolate()->IsInternallyUsedPropertyName(it->name()));
+                     (it->IsElement() || !it->name()->IsPrivate());
 
   for (; it->IsFound(); it->Next()) {
     switch (it->state()) {
@@ -5257,7 +5316,8 @@
       // they throw. Here we should do the same.
       case LookupIterator::INTERCEPTOR:
         if (handling == DONT_FORCE_FIELD) {
-          Maybe<bool> result = JSObject::SetPropertyWithInterceptor(it, value);
+          Maybe<bool> result =
+              JSObject::SetPropertyWithInterceptor(it, should_throw, value);
           if (result.IsNothing() || result.FromJust()) return result;
         }
         break;
@@ -5265,32 +5325,26 @@
       case LookupIterator::ACCESSOR: {
         Handle<Object> accessors = it->GetAccessors();
 
-        // Special handling for ExecutableAccessorInfo, which behaves like a
-        // data property.
-        if (accessors->IsExecutableAccessorInfo() &&
-            handling == DONT_FORCE_FIELD) {
-          PropertyDetails details = it->property_details();
+        // Special handling for AccessorInfo, which behaves like a data
+        // property.
+        if (accessors->IsAccessorInfo() && handling == DONT_FORCE_FIELD) {
+          PropertyAttributes current_attributes = it->property_attributes();
           // Ensure the context isn't changed after calling into accessors.
           AssertNoContextChange ncc(it->isolate());
 
-          Maybe<bool> result =
-              JSObject::SetPropertyWithAccessor(it, value, should_throw);
-          if (result.IsNothing() || !result.FromJust()) return result;
-
-          if (details.attributes() == attributes) return Just(true);
-
-          // Reconfigure the accessor if attributes mismatch.
-          Handle<ExecutableAccessorInfo> new_data = Accessors::CloneAccessor(
-              it->isolate(), Handle<ExecutableAccessorInfo>::cast(accessors));
-          new_data->set_property_attributes(attributes);
-          // By clearing the setter we don't have to introduce a lookup to
-          // the setter, simply make it unavailable to reflect the
-          // attributes.
-          if (attributes & READ_ONLY) {
-            ExecutableAccessorInfo::ClearSetter(new_data);
+          // Update the attributes before calling the setter. The setter may
+          // later change the shape of the property.
+          if (current_attributes != attributes) {
+            it->TransitionToAccessorPair(accessors, attributes);
           }
 
-          it->TransitionToAccessorPair(new_data, attributes);
+          Maybe<bool> result =
+              JSObject::SetPropertyWithAccessor(it, value, should_throw);
+
+          if (current_attributes == attributes || result.IsNothing()) {
+            return result;
+          }
+
         } else {
           it->ReconfigureDataProperty(value, attributes);
         }
@@ -5310,10 +5364,9 @@
                                             should_throw);
 
       case LookupIterator::DATA: {
-        PropertyDetails details = it->property_details();
         Handle<Object> old_value = it->factory()->the_hole_value();
         // Regular property update if the attributes match.
-        if (details.attributes() == attributes) {
+        if (it->property_attributes() == attributes) {
           return SetDataProperty(it, value);
         }
 
@@ -5347,32 +5400,29 @@
                          CERTAINLY_NOT_STORE_FROM_KEYED);
 }
 
-
 MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
     Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
-    PropertyAttributes attributes, ExecutableAccessorInfoHandling handling) {
+    PropertyAttributes attributes) {
   DCHECK(!value->IsTheHole());
   LookupIterator it(object, name, LookupIterator::OWN);
-  return DefineOwnPropertyIgnoreAttributes(&it, value, attributes, handling);
+  return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
 }
 
-
 MaybeHandle<Object> JSObject::SetOwnElementIgnoreAttributes(
     Handle<JSObject> object, uint32_t index, Handle<Object> value,
-    PropertyAttributes attributes, ExecutableAccessorInfoHandling handling) {
+    PropertyAttributes attributes) {
   Isolate* isolate = object->GetIsolate();
   LookupIterator it(isolate, object, index, LookupIterator::OWN);
-  return DefineOwnPropertyIgnoreAttributes(&it, value, attributes, handling);
+  return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
 }
 
-
 MaybeHandle<Object> JSObject::DefinePropertyOrElementIgnoreAttributes(
     Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
-    PropertyAttributes attributes, ExecutableAccessorInfoHandling handling) {
+    PropertyAttributes attributes) {
   Isolate* isolate = object->GetIsolate();
   LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
                                                         LookupIterator::OWN);
-  return DefineOwnPropertyIgnoreAttributes(&it, value, attributes, handling);
+  return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
 }
 
 
@@ -5391,7 +5441,8 @@
     return Just(ABSENT);
   }
   PropertyCallbackArguments args(isolate, interceptor->data(),
-                                 *it->GetReceiver(), *holder);
+                                 *it->GetReceiver(), *holder,
+                                 Object::DONT_THROW);
   if (!interceptor->query()->IsUndefined()) {
     v8::Local<v8::Integer> result;
     if (it->IsElement()) {
@@ -5468,7 +5519,7 @@
         return Just(ABSENT);
       case LookupIterator::ACCESSOR:
       case LookupIterator::DATA:
-        return Just(it->property_details().attributes());
+        return Just(it->property_attributes());
     }
   }
   return Just(ABSENT);
@@ -5531,123 +5582,6 @@
 }
 
 
-void JSObject::MigrateFastToSlow(Handle<JSObject> object,
-                                 Handle<Map> new_map,
-                                 int expected_additional_properties) {
-  // The global object is always normalized.
-  DCHECK(!object->IsJSGlobalObject());
-  // JSGlobalProxy must never be normalized
-  DCHECK(!object->IsJSGlobalProxy());
-
-  Isolate* isolate = object->GetIsolate();
-  HandleScope scope(isolate);
-  Handle<Map> map(object->map());
-
-  // Allocate new content.
-  int real_size = map->NumberOfOwnDescriptors();
-  int property_count = real_size;
-  if (expected_additional_properties > 0) {
-    property_count += expected_additional_properties;
-  } else {
-    property_count += 2;  // Make space for two more properties.
-  }
-  Handle<NameDictionary> dictionary =
-      NameDictionary::New(isolate, property_count);
-
-  Handle<DescriptorArray> descs(map->instance_descriptors());
-  for (int i = 0; i < real_size; i++) {
-    PropertyDetails details = descs->GetDetails(i);
-    Handle<Name> key(descs->GetKey(i));
-    switch (details.type()) {
-      case DATA_CONSTANT: {
-        Handle<Object> value(descs->GetConstant(i), isolate);
-        PropertyDetails d(details.attributes(), DATA, i + 1,
-                          PropertyCellType::kNoCell);
-        dictionary = NameDictionary::Add(dictionary, key, value, d);
-        break;
-      }
-      case DATA: {
-        FieldIndex index = FieldIndex::ForDescriptor(*map, i);
-        Handle<Object> value;
-        if (object->IsUnboxedDoubleField(index)) {
-          double old_value = object->RawFastDoublePropertyAt(index);
-          value = isolate->factory()->NewHeapNumber(old_value);
-        } else {
-          value = handle(object->RawFastPropertyAt(index), isolate);
-          if (details.representation().IsDouble()) {
-            DCHECK(value->IsMutableHeapNumber());
-            Handle<HeapNumber> old = Handle<HeapNumber>::cast(value);
-            value = isolate->factory()->NewHeapNumber(old->value());
-          }
-        }
-        PropertyDetails d(details.attributes(), DATA, i + 1,
-                          PropertyCellType::kNoCell);
-        dictionary = NameDictionary::Add(dictionary, key, value, d);
-        break;
-      }
-      case ACCESSOR: {
-        FieldIndex index = FieldIndex::ForDescriptor(*map, i);
-        Handle<Object> value(object->RawFastPropertyAt(index), isolate);
-        PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
-                          PropertyCellType::kNoCell);
-        dictionary = NameDictionary::Add(dictionary, key, value, d);
-        break;
-      }
-      case ACCESSOR_CONSTANT: {
-        Handle<Object> value(descs->GetCallbacksObject(i), isolate);
-        PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
-                          PropertyCellType::kNoCell);
-        dictionary = NameDictionary::Add(dictionary, key, value, d);
-        break;
-      }
-    }
-  }
-
-  // Copy the next enumeration index from instance descriptor.
-  dictionary->SetNextEnumerationIndex(real_size + 1);
-
-  // From here on we cannot fail and we shouldn't GC anymore.
-  DisallowHeapAllocation no_allocation;
-
-  // Resize the object in the heap if necessary.
-  int new_instance_size = new_map->instance_size();
-  int instance_size_delta = map->instance_size() - new_instance_size;
-  DCHECK(instance_size_delta >= 0);
-
-  if (instance_size_delta > 0) {
-    Heap* heap = isolate->heap();
-    heap->CreateFillerObjectAt(object->address() + new_instance_size,
-                               instance_size_delta);
-    heap->AdjustLiveBytes(*object, -instance_size_delta,
-                          Heap::CONCURRENT_TO_SWEEPER);
-  }
-
-  // We are storing the new map using release store after creating a filler for
-  // the left-over space to avoid races with the sweeper thread.
-  object->synchronized_set_map(*new_map);
-
-  object->set_properties(*dictionary);
-
-  // Ensure that in-object space of slow-mode object does not contain random
-  // garbage.
-  int inobject_properties = new_map->GetInObjectProperties();
-  for (int i = 0; i < inobject_properties; i++) {
-    FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
-    object->RawFastPropertyAtPut(index, Smi::FromInt(0));
-  }
-
-  isolate->counters()->props_to_dictionary()->Increment();
-
-#ifdef DEBUG
-  if (FLAG_trace_normalization) {
-    OFStream os(stdout);
-    os << "Object properties have been normalized:\n";
-    object->Print(os);
-  }
-#endif
-}
-
-
 void JSObject::MigrateSlowToFast(Handle<JSObject> object,
                                  int unused_property_fields,
                                  const char* reason) {
@@ -5717,7 +5651,7 @@
 
   // Allocate the instance descriptor.
   Handle<DescriptorArray> descriptors = DescriptorArray::Allocate(
-      isolate, instance_descriptor_length);
+      isolate, instance_descriptor_length, 0, TENURED);
 
   int number_of_allocated_fields =
       number_of_fields + unused_property_fields - inobject_props;
@@ -5885,14 +5819,18 @@
 
   DCHECK(object->HasFastSmiOrObjectElements() ||
          object->HasFastDoubleElements() ||
-         object->HasFastArgumentsElements());
+         object->HasFastArgumentsElements() ||
+         object->HasFastStringWrapperElements());
 
   Handle<SeededNumberDictionary> dictionary =
       GetNormalizedElementDictionary(object, elements);
 
   // Switch to using the dictionary as the backing storage for elements.
-  ElementsKind target_kind =
-      is_arguments ? SLOW_SLOPPY_ARGUMENTS_ELEMENTS : DICTIONARY_ELEMENTS;
+  ElementsKind target_kind = is_arguments
+                                 ? SLOW_SLOPPY_ARGUMENTS_ELEMENTS
+                                 : object->HasFastStringWrapperElements()
+                                       ? SLOW_STRING_WRAPPER_ELEMENTS
+                                       : DICTIONARY_ELEMENTS;
   Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, target_kind);
   // Set the new map first to satify the elements type assert in set_elements().
   JSObject::MigrateToMap(object, new_map);
@@ -5913,7 +5851,9 @@
   }
 #endif
 
-  DCHECK(object->HasDictionaryElements() || object->HasSlowArgumentsElements());
+  DCHECK(object->HasDictionaryElements() ||
+         object->HasSlowArgumentsElements() ||
+         object->HasSlowStringWrapperElements());
   return dictionary;
 }
 
@@ -6076,10 +6016,12 @@
 
 
 bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
-  Handle<Name> hidden = object->GetIsolate()->factory()->hidden_string();
-  LookupIterator it(object, hidden, LookupIterator::OWN_SKIP_INTERCEPTOR);
+  Isolate* isolate = object->GetIsolate();
+  Handle<Symbol> hidden = isolate->factory()->hidden_properties_symbol();
+  LookupIterator it(object, hidden);
   Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
-  // Cannot get an exception since the hidden_string isn't accessible to JS.
+  // Cannot get an exception since the hidden_properties_symbol isn't exposed to
+  // JS.
   DCHECK(maybe.IsJust());
   return maybe.FromJust() != ABSENT;
 }
@@ -6095,7 +6037,8 @@
     DescriptorArray* descriptors = this->map()->instance_descriptors();
     if (descriptors->number_of_descriptors() > 0) {
       int sorted_index = descriptors->GetSortedKeyIndex(0);
-      if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
+      if (descriptors->GetKey(sorted_index) ==
+              GetHeap()->hidden_properties_symbol() &&
           sorted_index < map()->NumberOfOwnDescriptors()) {
         DCHECK(descriptors->GetType(sorted_index) == DATA);
         DCHECK(descriptors->GetDetails(sorted_index).representation().
@@ -6110,9 +6053,8 @@
       return GetHeap()->undefined_value();
     }
   } else {
-    Isolate* isolate = GetIsolate();
-    LookupIterator it(handle(this), isolate->factory()->hidden_string(),
-                      LookupIterator::OWN_SKIP_INTERCEPTOR);
+    Handle<Symbol> hidden = GetIsolate()->factory()->hidden_properties_symbol();
+    LookupIterator it(handle(this), hidden);
     // Access check is always skipped for the hidden string anyways.
     return *GetDataProperty(&it);
   }
@@ -6141,13 +6083,14 @@
                                                       Handle<Object> value) {
   DCHECK(!object->IsJSGlobalProxy());
   Isolate* isolate = object->GetIsolate();
-  Handle<Name> name = isolate->factory()->hidden_string();
+  Handle<Symbol> name = isolate->factory()->hidden_properties_symbol();
   SetOwnPropertyIgnoreAttributes(object, name, value, DONT_ENUM).Assert();
   return object;
 }
 
 
-Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it) {
+Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
+                                                    ShouldThrow should_throw) {
   Isolate* isolate = it->isolate();
   // Make sure that the top context does not change when doing callbacks or
   // interceptor calls.
@@ -6160,7 +6103,7 @@
   Handle<JSObject> holder = it->GetHolder<JSObject>();
 
   PropertyCallbackArguments args(isolate, interceptor->data(),
-                                 *it->GetReceiver(), *holder);
+                                 *it->GetReceiver(), *holder, should_throw);
   v8::Local<v8::Boolean> result;
   if (it->IsElement()) {
     uint32_t index = it->index();
@@ -6223,6 +6166,8 @@
 
 Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
                                        LanguageMode language_mode) {
+  it->UpdateProtector();
+
   Isolate* isolate = it->isolate();
 
   if (it->state() == LookupIterator::JSPROXY) {
@@ -6233,16 +6178,15 @@
   if (it->GetReceiver()->IsJSProxy()) {
     if (it->state() != LookupIterator::NOT_FOUND) {
       DCHECK_EQ(LookupIterator::DATA, it->state());
-      DCHECK(it->GetName()->IsPrivate());
+      DCHECK(it->name()->IsPrivate());
       it->Delete();
     }
     return Just(true);
   }
   Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
 
-  bool is_observed =
-      receiver->map()->is_observed() &&
-      (it->IsElement() || !isolate->IsInternallyUsedPropertyName(it->name()));
+  bool is_observed = receiver->map()->is_observed() &&
+                     (it->IsElement() || !it->name()->IsPrivate());
 
   Handle<Object> old_value = it->factory()->the_hole_value();
 
@@ -6258,7 +6202,10 @@
         RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
         return Just(false);
       case LookupIterator::INTERCEPTOR: {
-        Maybe<bool> result = JSObject::DeletePropertyWithInterceptor(it);
+        ShouldThrow should_throw =
+            is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+        Maybe<bool> result =
+            JSObject::DeletePropertyWithInterceptor(it, should_throw);
         // An exception was thrown in the interceptor. Propagate.
         if (isolate->has_pending_exception()) return Nothing<bool>();
         // Delete with interceptor succeeded. Return result.
@@ -6413,8 +6360,8 @@
   // 5. ReturnIfAbrupt(keys).
   Handle<FixedArray> keys;
   ASSIGN_RETURN_ON_EXCEPTION(
-      isolate, keys,
-      JSReceiver::GetKeys(props, JSReceiver::OWN_ONLY, ALL_PROPERTIES), Object);
+      isolate, keys, JSReceiver::GetKeys(props, OWN_ONLY, ALL_PROPERTIES),
+      Object);
   // 6. Let descriptors be an empty List.
   int capacity = keys->length();
   std::vector<PropertyDescriptor> descriptors(capacity);
@@ -6599,8 +6546,8 @@
                 ? desc->value()
                 : Handle<Object>::cast(isolate->factory()->undefined_value()));
         MaybeHandle<Object> result =
-            JSObject::DefineOwnPropertyIgnoreAttributes(
-                it, value, desc->ToAttributes(), JSObject::DONT_FORCE_FIELD);
+            JSObject::DefineOwnPropertyIgnoreAttributes(it, value,
+                                                        desc->ToAttributes());
         if (result.is_null()) return Nothing<bool>();
       }
     } else {
@@ -6792,8 +6739,8 @@
                                   ? current->value()
                                   : Handle<Object>::cast(
                                         isolate->factory()->undefined_value()));
-      MaybeHandle<Object> result = JSObject::DefineOwnPropertyIgnoreAttributes(
-          it, value, attrs, JSObject::DONT_FORCE_FIELD);
+      MaybeHandle<Object> result =
+          JSObject::DefineOwnPropertyIgnoreAttributes(it, value, attrs);
       if (result.is_null()) return Nothing<bool>();
     } else {
       DCHECK(desc_is_accessor_descriptor ||
@@ -6857,10 +6804,9 @@
       return Just(false);
   }
 
-  RETURN_ON_EXCEPTION_VALUE(
-      it->isolate(),
-      DefineOwnPropertyIgnoreAttributes(it, value, NONE, DONT_FORCE_FIELD),
-      Nothing<bool>());
+  RETURN_ON_EXCEPTION_VALUE(it->isolate(),
+                            DefineOwnPropertyIgnoreAttributes(it, value, NONE),
+                            Nothing<bool>());
 
   return Just(true);
 }
@@ -7081,7 +7027,7 @@
                                        ShouldThrow should_throw) {
   STACK_CHECK(Nothing<bool>());
   if (key->IsSymbol() && Handle<Symbol>::cast(key)->IsPrivate()) {
-    return AddPrivateProperty(isolate, proxy, Handle<Symbol>::cast(key), desc,
+    return SetPrivateProperty(isolate, proxy, Handle<Symbol>::cast(key), desc,
                               should_throw);
   }
   Handle<String> trap_name = isolate->factory()->defineProperty_string();
@@ -7187,7 +7133,7 @@
 
 
 // static
-Maybe<bool> JSProxy::AddPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
+Maybe<bool> JSProxy::SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
                                         Handle<Symbol> private_name,
                                         PropertyDescriptor* desc,
                                         ShouldThrow should_throw) {
@@ -7207,7 +7153,7 @@
 
   if (it.IsFound()) {
     DCHECK_EQ(LookupIterator::DATA, it.state());
-    DCHECK_EQ(DONT_ENUM, it.property_details().attributes());
+    DCHECK_EQ(DONT_ENUM, it.property_attributes());
     it.WriteDataValue(value);
     return Just(true);
   }
@@ -7277,9 +7223,9 @@
     Handle<AccessorPair> accessors =
         Handle<AccessorPair>::cast(it->GetAccessors());
     // 6a. Set D.[[Get]] to the value of X's [[Get]] attribute.
-    desc->set_get(handle(accessors->GetComponent(ACCESSOR_GETTER), isolate));
+    desc->set_get(AccessorPair::GetComponent(accessors, ACCESSOR_GETTER));
     // 6b. Set D.[[Set]] to the value of X's [[Set]] attribute.
-    desc->set_set(handle(accessors->GetComponent(ACCESSOR_SETTER), isolate));
+    desc->set_set(AccessorPair::GetComponent(accessors, ACCESSOR_SETTER));
   }
 
   // 7. Set D.[[Enumerable]] to the value of X's [[Enumerable]] attribute.
@@ -7412,9 +7358,7 @@
 bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
                                             ElementsKind kind,
                                             Object* object) {
-  DCHECK(IsFastObjectElementsKind(kind) ||
-         kind == DICTIONARY_ELEMENTS);
-  if (IsFastObjectElementsKind(kind)) {
+  if (IsFastObjectElementsKind(kind) || kind == FAST_STRING_WRAPPER_ELEMENTS) {
     int length = IsJSArray()
         ? Smi::cast(JSArray::cast(this)->length())->value()
         : elements->length();
@@ -7423,6 +7367,7 @@
       if (!element->IsTheHole() && element == object) return true;
     }
   } else {
+    DCHECK(kind == DICTIONARY_ELEMENTS || kind == SLOW_STRING_WRAPPER_ELEMENTS);
     Object* key =
         SeededNumberDictionary::cast(elements)->SlowReverseLookup(object);
     if (!key->IsUndefined()) return true;
@@ -7473,7 +7418,9 @@
       break;
     case FAST_ELEMENTS:
     case FAST_HOLEY_ELEMENTS:
-    case DICTIONARY_ELEMENTS: {
+    case DICTIONARY_ELEMENTS:
+    case FAST_STRING_WRAPPER_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(this->elements());
       if (ReferencesObjectFromElements(elements, kind, obj)) return true;
       break;
@@ -7494,6 +7441,8 @@
       if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
       break;
     }
+    case NO_ELEMENTS:
+      break;
   }
 
   // For functions check the context.
@@ -7875,7 +7824,8 @@
 
   Handle<SeededNumberDictionary> new_element_dictionary;
   if (!object->HasFixedTypedArrayElements() &&
-      !object->HasDictionaryElements()) {
+      !object->HasDictionaryElements() &&
+      !object->HasSlowStringWrapperElements()) {
     int length =
         object->IsJSArray()
             ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
@@ -7902,7 +7852,8 @@
   if (transition != NULL) {
     Handle<Map> transition_map(transition, isolate);
     DCHECK(transition_map->has_dictionary_elements() ||
-           transition_map->has_fixed_typed_array_elements());
+           transition_map->has_fixed_typed_array_elements() ||
+           transition_map->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
     DCHECK(!transition_map->is_extensible());
     JSObject::MigrateToMap(object, transition_map);
   } else if (TransitionArray::CanHaveMoreTransitions(old_map)) {
@@ -7922,7 +7873,11 @@
         Map::Copy(handle(object->map()), "SlowCopyForPreventExtensions");
     new_map->set_is_extensible(false);
     if (!new_element_dictionary.is_null()) {
-      new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+      ElementsKind new_kind =
+          IsStringWrapperElementsKind(old_map->elements_kind())
+              ? SLOW_STRING_WRAPPER_ELEMENTS
+              : DICTIONARY_ELEMENTS;
+      new_map->set_elements_kind(new_kind);
     }
     JSObject::MigrateToMap(object, new_map);
 
@@ -7947,7 +7902,8 @@
     return Just(true);
   }
 
-  DCHECK(object->map()->has_dictionary_elements());
+  DCHECK(object->map()->has_dictionary_elements() ||
+         object->map()->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
   if (!new_element_dictionary.is_null()) {
     object->set_elements(*new_element_dictionary);
   }
@@ -7999,8 +7955,9 @@
   return Object::WrapForRead(isolate, raw_value, representation);
 }
 
+enum class BoilerplateKind { kNormalBoilerplate, kApiBoilerplate };
 
-template<class ContextObject>
+template <class ContextObject, BoilerplateKind boilerplate_kind>
 class JSObjectWalkVisitor {
  public:
   JSObjectWalkVisitor(ContextObject* site_context, bool copying,
@@ -8032,10 +7989,9 @@
   const JSObject::DeepCopyHints hints_;
 };
 
-
-template <class ContextObject>
-MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
-    Handle<JSObject> object) {
+template <class ContextObject, BoilerplateKind boilerplate_kind>
+MaybeHandle<JSObject> JSObjectWalkVisitor<
+    ContextObject, boilerplate_kind>::StructureWalk(Handle<JSObject> object) {
   Isolate* isolate = this->isolate();
   bool copying = this->copying();
   bool shallow = hints_ == JSObject::kObjectIsShallow;
@@ -8055,6 +8011,26 @@
 
   Handle<JSObject> copy;
   if (copying) {
+    if (boilerplate_kind == BoilerplateKind::kApiBoilerplate) {
+      if (object->IsJSFunction()) {
+#ifdef DEBUG
+        // Ensure that it is an Api function and template_instantiations_cache
+        // contains an entry for function's FunctionTemplateInfo.
+        JSFunction* function = JSFunction::cast(*object);
+        CHECK(function->shared()->IsApiFunction());
+        FunctionTemplateInfo* data = function->shared()->get_api_func_data();
+        auto serial_number = handle(Smi::cast(data->serial_number()), isolate);
+        CHECK(serial_number->value());
+        auto cache = isolate->template_instantiations_cache();
+        Object* element = cache->Lookup(serial_number);
+        CHECK_EQ(function, element);
+#endif
+        return object;
+      }
+    } else {
+      // JSFunction objects are not allowed to be in normal boilerplates at all.
+      DCHECK(!object->IsJSFunction());
+    }
     Handle<AllocationSite> site_to_pass;
     if (site_context()->ShouldCreateMemento(object)) {
       site_to_pass = site_context()->current();
@@ -8115,7 +8091,7 @@
       // an array.
       PropertyFilter filter = static_cast<PropertyFilter>(
           ONLY_WRITABLE | ONLY_ENUMERABLE | ONLY_CONFIGURABLE);
-      KeyAccumulator accumulator(isolate, filter);
+      KeyAccumulator accumulator(isolate, OWN_ONLY, filter);
       accumulator.NextPrototype();
       copy->CollectOwnPropertyNames(&accumulator, filter);
       Handle<FixedArray> names = accumulator.GetKeys();
@@ -8139,12 +8115,8 @@
     }
 
     // Deep copy own elements.
-    // Pixel elements cannot be created using an object literal.
-    DCHECK(!copy->HasFixedTypedArrayElements());
     switch (kind) {
-      case FAST_SMI_ELEMENTS:
       case FAST_ELEMENTS:
-      case FAST_HOLEY_SMI_ELEMENTS:
       case FAST_HOLEY_ELEMENTS: {
         Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
         if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
@@ -8156,9 +8128,6 @@
         } else {
           for (int i = 0; i < elements->length(); i++) {
             Handle<Object> value(elements->get(i), isolate);
-            DCHECK(value->IsSmi() ||
-                   value->IsTheHole() ||
-                   (IsFastObjectElementsKind(copy->GetElementsKind())));
             if (value->IsJSObject()) {
               Handle<JSObject> result;
               ASSIGN_RETURN_ON_EXCEPTION(
@@ -8199,16 +8168,25 @@
       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
         UNIMPLEMENTED();
         break;
-
+      case FAST_STRING_WRAPPER_ELEMENTS:
+      case SLOW_STRING_WRAPPER_ELEMENTS:
+        UNREACHABLE();
+        break;
 
 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)                        \
       case TYPE##_ELEMENTS:                                                    \
 
       TYPED_ARRAYS(TYPED_ARRAY_CASE)
 #undef TYPED_ARRAY_CASE
+      // Typed elements cannot be created using an object literal.
+      UNREACHABLE();
+      break;
 
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case NO_ELEMENTS:
         // No contained objects, nothing to do.
         break;
     }
@@ -8221,8 +8199,9 @@
 MaybeHandle<JSObject> JSObject::DeepWalk(
     Handle<JSObject> object,
     AllocationSiteCreationContext* site_context) {
-  JSObjectWalkVisitor<AllocationSiteCreationContext> v(site_context, false,
-                                                       kNoHints);
+  JSObjectWalkVisitor<AllocationSiteCreationContext,
+                      BoilerplateKind::kNormalBoilerplate> v(site_context,
+                                                             false, kNoHints);
   MaybeHandle<JSObject> result = v.StructureWalk(object);
   Handle<JSObject> for_assert;
   DCHECK(!result.ToHandle(&for_assert) || for_assert.is_identical_to(object));
@@ -8234,13 +8213,35 @@
     Handle<JSObject> object,
     AllocationSiteUsageContext* site_context,
     DeepCopyHints hints) {
-  JSObjectWalkVisitor<AllocationSiteUsageContext> v(site_context, true, hints);
+  JSObjectWalkVisitor<AllocationSiteUsageContext,
+                      BoilerplateKind::kNormalBoilerplate> v(site_context, true,
+                                                             hints);
   MaybeHandle<JSObject> copy = v.StructureWalk(object);
   Handle<JSObject> for_assert;
   DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
   return copy;
 }
 
+class DummyContextObject : public AllocationSiteContext {
+ public:
+  explicit DummyContextObject(Isolate* isolate)
+      : AllocationSiteContext(isolate) {}
+
+  bool ShouldCreateMemento(Handle<JSObject> object) { return false; }
+  Handle<AllocationSite> EnterNewScope() { return Handle<AllocationSite>(); }
+  void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object) {}
+};
+
+MaybeHandle<JSObject> JSObject::DeepCopyApiBoilerplate(
+    Handle<JSObject> object) {
+  DummyContextObject dummy_context_object(object->GetIsolate());
+  JSObjectWalkVisitor<DummyContextObject, BoilerplateKind::kApiBoilerplate> v(
+      &dummy_context_object, true, kNoHints);
+  MaybeHandle<JSObject> copy = v.StructureWalk(object);
+  Handle<JSObject> for_assert;
+  DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
+  return copy;
+}
 
 // static
 MaybeHandle<Object> JSReceiver::ToPrimitive(Handle<JSReceiver> receiver,
@@ -8314,12 +8315,6 @@
 
 // TODO(cbruni/jkummerow): Consider moving this into elements.cc.
 bool HasEnumerableElements(JSObject* object) {
-  if (object->IsJSValue()) {
-    Object* value = JSValue::cast(object)->value();
-    if (value->IsString()) {
-      if (String::cast(value)->length() > 0) return true;
-    }
-  }
   switch (object->GetElementsKind()) {
     case FAST_SMI_ELEMENTS:
     case FAST_ELEMENTS:
@@ -8371,6 +8366,14 @@
     case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
       // We're approximating non-empty arguments objects here.
       return true;
+    case FAST_STRING_WRAPPER_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS:
+      if (String::cast(JSValue::cast(object)->value())->length() > 0) {
+        return true;
+      }
+      return object->elements()->length() > 0;
+    case NO_ELEMENTS:
+      return false;
   }
   UNREACHABLE();
   return true;
@@ -8444,22 +8447,27 @@
 
 static Handle<FixedArray> ReduceFixedArrayTo(
     Handle<FixedArray> array, int length) {
-  DCHECK(array->length() >= length);
+  DCHECK_LE(length, array->length());
   if (array->length() == length) return array;
-
-  Handle<FixedArray> new_array =
-      array->GetIsolate()->factory()->NewFixedArray(length);
-  for (int i = 0; i < length; ++i) new_array->set(i, array->get(i));
-  return new_array;
+  return array->GetIsolate()->factory()->CopyFixedArrayUpTo(array, length);
 }
 
+bool Map::OnlyHasSimpleProperties() {
+  // Wrapped string elements aren't explicitly stored in the elements backing
+  // store, but are loaded indirectly from the underlying string.
+  return !IsStringWrapperElementsKind(elements_kind()) &&
+         !is_access_check_needed() && !has_named_interceptor() &&
+         !has_indexed_interceptor() && !has_hidden_prototype() &&
+         !is_dictionary_map();
+}
 
 namespace {
 
 Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
-                                           Handle<JSObject> object,
-                                           bool cache_enum_length) {
+                                           Handle<JSObject> object) {
   Handle<Map> map(object->map());
+  bool cache_enum_length = map->OnlyHasSimpleProperties();
+
   Handle<DescriptorArray> descs =
       Handle<DescriptorArray>(map->instance_descriptors(), isolate);
   int own_property_count = map->EnumLength();
@@ -8532,12 +8540,10 @@
 
 }  // namespace
 
-
-Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object,
-                                                 bool cache_enum_length) {
+Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object) {
   Isolate* isolate = object->GetIsolate();
   if (object->HasFastProperties()) {
-    return GetFastEnumPropertyKeys(isolate, object, cache_enum_length);
+    return GetFastEnumPropertyKeys(isolate, object);
   } else if (object->IsJSGlobalObject()) {
     Handle<GlobalDictionary> dictionary(object->global_dictionary());
     int length = dictionary->NumberOfEnumElements();
@@ -8583,7 +8589,7 @@
     return Just(true);
   }
   PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
-                                 *object);
+                                 *object, Object::DONT_THROW);
   v8::Local<v8::Object> result;
   if (!interceptor->enumerator()->IsUndefined()) {
     Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
@@ -8603,8 +8609,8 @@
     accumulator->AddElementKeysFromInterceptor(
         Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)));
   } else {
-    accumulator->AddKeys(
-        Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)));
+    accumulator->AddKeys(Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)),
+                         DO_NOT_CONVERT);
   }
   return Just(true);
 }
@@ -8616,7 +8622,7 @@
                                        Handle<JSReceiver> receiver,
                                        Handle<JSObject> object,
                                        PropertyFilter* filter,
-                                       JSReceiver::KeyCollectionType type,
+                                       KeyCollectionType type,
                                        KeyAccumulator* accumulator) {
   accumulator->NextPrototype();
   // Check access rights if required.
@@ -8624,11 +8630,11 @@
       !isolate->MayAccess(handle(isolate->context()), object)) {
     // The cross-origin spec says that [[Enumerate]] shall return an empty
     // iterator when it doesn't have access...
-    if (type == JSReceiver::INCLUDE_PROTOS) {
+    if (type == INCLUDE_PROTOS) {
       return Just(false);
     }
     // ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
-    DCHECK(type == JSReceiver::OWN_ONLY);
+    DCHECK_EQ(OWN_ONLY, type);
     *filter = static_cast<PropertyFilter>(*filter | ONLY_ALL_CAN_READ);
   }
 
@@ -8641,33 +8647,8 @@
   MAYBE_RETURN(success, Nothing<bool>());
 
   if (*filter == ENUMERABLE_STRINGS) {
-    // We can cache the computed property keys if access checks are
-    // not needed and no interceptors are involved.
-    //
-    // We do not use the cache if the object has elements and
-    // therefore it does not make sense to cache the property names
-    // for arguments objects.  Arguments objects will always have
-    // elements.
-    // Wrapped strings have elements, but don't have an elements
-    // array or dictionary.  So the fast inline test for whether to
-    // use the cache says yes, so we should not create a cache.
-    Handle<JSFunction> arguments_function(
-        JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
-    bool has_hidden_prototype = false;
-    Object* prototype = object->map()->prototype();
-    if (prototype->IsJSObject()) {
-      has_hidden_prototype =
-          JSObject::cast(prototype)->map()->is_hidden_prototype();
-    }
-    bool cache_enum_length =
-        ((object->map()->GetConstructor() != *arguments_function) &&
-         !object->IsJSValue() && !object->IsAccessCheckNeeded() &&
-         !object->HasNamedInterceptor() && !object->HasIndexedInterceptor() &&
-         !has_hidden_prototype);
-    // Compute the property keys and cache them if possible.
-    Handle<FixedArray> enum_keys =
-        JSObject::GetEnumPropertyKeys(object, cache_enum_length);
-    accumulator->AddKeys(enum_keys);
+    Handle<FixedArray> enum_keys = JSObject::GetEnumPropertyKeys(object);
+    accumulator->AddKeys(enum_keys, DO_NOT_CONVERT);
   } else {
     object->CollectOwnPropertyNames(accumulator, *filter);
   }
@@ -8686,28 +8667,22 @@
 static Maybe<bool> GetKeys_Internal(Isolate* isolate,
                                     Handle<JSReceiver> receiver,
                                     Handle<JSReceiver> object,
-                                    JSReceiver::KeyCollectionType type,
+                                    KeyCollectionType type,
                                     PropertyFilter filter,
                                     KeyAccumulator* accumulator) {
-  PrototypeIterator::WhereToEnd end = type == JSReceiver::OWN_ONLY
+  PrototypeIterator::WhereToEnd end = type == OWN_ONLY
                                           ? PrototypeIterator::END_AT_NON_HIDDEN
                                           : PrototypeIterator::END_AT_NULL;
   for (PrototypeIterator iter(isolate, object,
-                              PrototypeIterator::START_AT_RECEIVER);
-       !iter.IsAtEnd(end); iter.Advance()) {
+                              PrototypeIterator::START_AT_RECEIVER, end);
+       !iter.IsAtEnd(); iter.Advance()) {
     Handle<JSReceiver> current =
         PrototypeIterator::GetCurrent<JSReceiver>(iter);
     Maybe<bool> result = Just(false);  // Dummy initialization.
     if (current->IsJSProxy()) {
-      if (type == JSReceiver::OWN_ONLY) {
-        result = JSProxy::OwnPropertyKeys(isolate, receiver,
-                                          Handle<JSProxy>::cast(current),
-                                          filter, accumulator);
-      } else {
-        DCHECK(type == JSReceiver::INCLUDE_PROTOS);
-        result = JSProxy::Enumerate(
-            isolate, receiver, Handle<JSProxy>::cast(current), accumulator);
-      }
+      result = JSProxy::OwnPropertyKeys(isolate, receiver,
+                                        Handle<JSProxy>::cast(current), filter,
+                                        accumulator);
     } else {
       DCHECK(current->IsJSObject());
       result = GetKeysFromJSObject(isolate, receiver,
@@ -8721,54 +8696,6 @@
 }
 
 
-// ES6 9.5.11
-// Returns false in case of exception.
-// static
-Maybe<bool> JSProxy::Enumerate(Isolate* isolate, Handle<JSReceiver> receiver,
-                               Handle<JSProxy> proxy,
-                               KeyAccumulator* accumulator) {
-  STACK_CHECK(Nothing<bool>());
-  // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
-  Handle<Object> handler(proxy->handler(), isolate);
-  // 2. If handler is null, throw a TypeError exception.
-  // 3. Assert: Type(handler) is Object.
-  if (proxy->IsRevoked()) {
-    isolate->Throw(*isolate->factory()->NewTypeError(
-        MessageTemplate::kProxyRevoked,
-        isolate->factory()->enumerate_string()));
-    return Nothing<bool>();
-  }
-  // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
-  Handle<JSReceiver> target(proxy->target(), isolate);
-  // 5. Let trap be ? GetMethod(handler, "enumerate").
-  Handle<Object> trap;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
-                                       isolate->factory()->enumerate_string()),
-      Nothing<bool>());
-  // 6. If trap is undefined, then
-  if (trap->IsUndefined()) {
-    // 6a. Return target.[[Enumerate]]().
-    return GetKeys_Internal(isolate, receiver, target, INCLUDE_PROTOS,
-                            ENUMERABLE_STRINGS, accumulator);
-  }
-  // The "proxy_enumerate" helper calls the trap (steps 7 - 9), which returns
-  // a generator; it then iterates over that generator until it's exhausted
-  // and returns an array containing the generated values.
-  Handle<Object> trap_result_array;
-  Handle<Object> args[] = {trap, handler, target};
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, trap_result_array,
-      Execution::Call(isolate, isolate->proxy_enumerate(),
-                      isolate->factory()->undefined_value(), arraysize(args),
-                      args),
-      Nothing<bool>());
-  accumulator->NextPrototype();
-  accumulator->AddKeysFromProxy(Handle<JSObject>::cast(trap_result_array));
-  return Just(true);
-}
-
-
 // ES6 9.5.12
 // Returns |true| on success, |nothing| in case of exception.
 // static
@@ -8866,10 +8793,15 @@
   const int kPresent = 1;
   const int kGone = 0;
   IdentityMap<int> unchecked_result_keys(isolate->heap(), &set_zone);
-  int unchecked_result_keys_size = trap_result->length();
+  int unchecked_result_keys_size = 0;
   for (int i = 0; i < trap_result->length(); ++i) {
     DCHECK(trap_result->get(i)->IsUniqueName());
-    unchecked_result_keys.Set(trap_result->get(i), kPresent);
+    Object* key = trap_result->get(i);
+    int* entry = unchecked_result_keys.Get(key);
+    if (*entry != kPresent) {
+      *entry = kPresent;
+      unchecked_result_keys_size++;
+    }
   }
   // 17. Repeat, for each key that is an element of targetNonconfigurableKeys:
   for (int i = 0; i < nonconfigurable_keys_length; ++i) {
@@ -8924,7 +8856,7 @@
                                             GetKeysConversion keys_conversion) {
   USE(ContainsOnlyValidKeys);
   Isolate* isolate = object->GetIsolate();
-  KeyAccumulator accumulator(isolate, filter);
+  KeyAccumulator accumulator(isolate, type, filter);
   MAYBE_RETURN(
       GetKeys_Internal(isolate, object, object, type, filter, &accumulator),
       MaybeHandle<FixedArray>());
@@ -8933,6 +8865,64 @@
   return keys;
 }
 
+MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
+                                              Handle<JSReceiver> object,
+                                              PropertyFilter filter,
+                                              bool get_entries) {
+  PropertyFilter key_filter =
+      static_cast<PropertyFilter>(filter & ~ONLY_ENUMERABLE);
+  KeyAccumulator accumulator(isolate, OWN_ONLY, key_filter);
+  MAYBE_RETURN(GetKeys_Internal(isolate, object, object, OWN_ONLY, key_filter,
+                                &accumulator),
+               MaybeHandle<FixedArray>());
+  Handle<FixedArray> keys = accumulator.GetKeys(CONVERT_TO_STRING);
+  DCHECK(ContainsOnlyValidKeys(keys));
+
+  Handle<FixedArray> values_or_entries =
+      isolate->factory()->NewFixedArray(keys->length());
+  int length = 0;
+
+  for (int i = 0; i < keys->length(); ++i) {
+    Handle<Name> key = Handle<Name>::cast(handle(keys->get(i), isolate));
+
+    if (filter & ONLY_ENUMERABLE) {
+      PropertyDescriptor descriptor;
+      Maybe<bool> did_get_descriptor = JSReceiver::GetOwnPropertyDescriptor(
+          isolate, object, key, &descriptor);
+      MAYBE_RETURN(did_get_descriptor, MaybeHandle<FixedArray>());
+      if (!did_get_descriptor.FromJust() || !descriptor.enumerable()) continue;
+    }
+
+    Handle<Object> value;
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        isolate, value, JSReceiver::GetPropertyOrElement(object, key),
+        MaybeHandle<FixedArray>());
+
+    if (get_entries) {
+      Handle<FixedArray> entry_storage =
+          isolate->factory()->NewUninitializedFixedArray(2);
+      entry_storage->set(0, *key);
+      entry_storage->set(1, *value);
+      value = isolate->factory()->NewJSArrayWithElements(entry_storage,
+                                                         FAST_ELEMENTS, 2);
+    }
+
+    values_or_entries->set(length, *value);
+    length++;
+  }
+  if (length < values_or_entries->length()) values_or_entries->Shrink(length);
+  return values_or_entries;
+}
+
+MaybeHandle<FixedArray> JSReceiver::GetOwnValues(Handle<JSReceiver> object,
+                                                 PropertyFilter filter) {
+  return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, false);
+}
+
+MaybeHandle<FixedArray> JSReceiver::GetOwnEntries(Handle<JSReceiver> object,
+                                                  PropertyFilter filter) {
+  return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, true);
+}
 
 bool Map::DictionaryElementsInPrototypeChainOnly() {
   if (IsDictionaryElementsKind(elements_kind())) {
@@ -9000,7 +8990,7 @@
 
   Handle<Object> old_value = isolate->factory()->the_hole_value();
   bool is_observed = object->map()->is_observed() &&
-                     !isolate->IsInternallyUsedPropertyName(it->GetName());
+                     (it->IsElement() || !it->name()->IsPrivate());
   bool preexists = false;
   if (is_observed) {
     CHECK(GetPropertyAttributes(it).IsJust());
@@ -9011,8 +9001,10 @@
     }
   }
 
-  DCHECK(getter->IsCallable() || getter->IsUndefined() || getter->IsNull());
-  DCHECK(setter->IsCallable() || setter->IsUndefined() || setter->IsNull());
+  DCHECK(getter->IsCallable() || getter->IsUndefined() || getter->IsNull() ||
+         getter->IsFunctionTemplateInfo());
+  DCHECK(setter->IsCallable() || setter->IsUndefined() || setter->IsNull() ||
+         getter->IsFunctionTemplateInfo());
   // At least one of the accessors needs to be a new value.
   DCHECK(!getter->IsNull() || !setter->IsNull());
   if (!getter->IsNull()) {
@@ -9111,9 +9103,8 @@
       case LookupIterator::ACCESSOR: {
         Handle<Object> maybe_pair = it.GetAccessors();
         if (maybe_pair->IsAccessorPair()) {
-          return handle(
-              AccessorPair::cast(*maybe_pair)->GetComponent(component),
-              isolate);
+          return AccessorPair::GetComponent(
+              Handle<AccessorPair>::cast(maybe_pair), component);
         }
       }
     }
@@ -9182,8 +9173,6 @@
   if (!map->is_dictionary_map()) {
     new_bit_field3 = IsUnstable::update(new_bit_field3, false);
   }
-  new_bit_field3 =
-      ConstructionCounter::update(new_bit_field3, kNoSlackTracking);
   result->set_bit_field3(new_bit_field3);
   return result;
 }
@@ -9240,7 +9229,7 @@
     new_map = Map::CopyNormalized(fast_map, mode);
     if (use_cache) {
       cache->Set(fast_map, new_map);
-      isolate->counters()->normalized_maps()->Increment();
+      isolate->counters()->maps_normalized()->Increment();
     }
 #if TRACE_MAPS
     if (FLAG_trace_maps) {
@@ -9270,6 +9259,7 @@
 
   result->set_dictionary_map(true);
   result->set_migration_target(false);
+  result->set_construction_counter(kNoSlackTracking);
 
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) result->DictionaryMapVerify();
@@ -9453,7 +9443,7 @@
       for (int i = 0; i < length; i++) {
         descriptors->SetRepresentation(i, Representation::Tagged());
         if (descriptors->GetDetails(i).type() == DATA) {
-          descriptors->SetValue(i, HeapType::Any());
+          descriptors->SetValue(i, FieldType::Any());
         }
       }
       result->InitializeDescriptors(*descriptors,
@@ -9760,23 +9750,37 @@
       transition_marker, reason, SPECIAL_TRANSITION);
   new_map->set_is_extensible(false);
   if (!IsFixedTypedArrayElementsKind(map->elements_kind())) {
-    new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+    ElementsKind new_kind = IsStringWrapperElementsKind(map->elements_kind())
+                                ? SLOW_STRING_WRAPPER_ELEMENTS
+                                : DICTIONARY_ELEMENTS;
+    new_map->set_elements_kind(new_kind);
   }
   return new_map;
 }
 
+FieldType* DescriptorArray::GetFieldType(int descriptor_number) {
+  DCHECK(GetDetails(descriptor_number).location() == kField);
+  Object* value = GetValue(descriptor_number);
+  if (value->IsWeakCell()) {
+    if (WeakCell::cast(value)->cleared()) return FieldType::None();
+    value = WeakCell::cast(value)->value();
+  }
+  return FieldType::cast(value);
+}
 
-bool DescriptorArray::CanHoldValue(int descriptor, Object* value) {
-  PropertyDetails details = GetDetails(descriptor);
+namespace {
+
+bool CanHoldValue(DescriptorArray* descriptors, int descriptor, Object* value) {
+  PropertyDetails details = descriptors->GetDetails(descriptor);
   switch (details.type()) {
     case DATA:
       return value->FitsRepresentation(details.representation()) &&
-             GetFieldType(descriptor)->NowContains(value);
+             descriptors->GetFieldType(descriptor)->NowContains(value);
 
     case DATA_CONSTANT:
-      DCHECK(GetConstant(descriptor) != value ||
+      DCHECK(descriptors->GetConstant(descriptor) != value ||
              value->FitsRepresentation(details.representation()));
-      return GetConstant(descriptor) == value;
+      return descriptors->GetConstant(descriptor) == value;
 
     case ACCESSOR:
     case ACCESSOR_CONSTANT:
@@ -9787,28 +9791,29 @@
   return false;
 }
 
+Handle<Map> UpdateDescriptorForValue(Handle<Map> map, int descriptor,
+                                     Handle<Object> value) {
+  if (CanHoldValue(map->instance_descriptors(), descriptor, *value)) return map;
+
+  Isolate* isolate = map->GetIsolate();
+  PropertyAttributes attributes =
+      map->instance_descriptors()->GetDetails(descriptor).attributes();
+  Representation representation = value->OptimalRepresentation();
+  Handle<FieldType> type = value->OptimalType(isolate, representation);
+
+  return Map::ReconfigureProperty(map, descriptor, kData, attributes,
+                                  representation, type, FORCE_FIELD);
+}
+
+}  // namespace
 
 // static
 Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
                                         Handle<Object> value) {
   // Dictionaries can store any property value.
-  if (map->is_dictionary_map()) return map;
-
-  // Migrate to the newest map before storing the property.
-  map = Update(map);
-
-  Handle<DescriptorArray> descriptors(map->instance_descriptors());
-
-  if (descriptors->CanHoldValue(descriptor, *value)) return map;
-
-  Isolate* isolate = map->GetIsolate();
-  PropertyAttributes attributes =
-      descriptors->GetDetails(descriptor).attributes();
-  Representation representation = value->OptimalRepresentation();
-  Handle<HeapType> type = value->OptimalType(isolate, representation);
-
-  return ReconfigureProperty(map, descriptor, kData, attributes, representation,
-                             type, FORCE_FIELD);
+  DCHECK(!map->is_dictionary_map());
+  // Update to the newest map before storing the property.
+  return UpdateDescriptorForValue(Update(map), descriptor, value);
 }
 
 
@@ -9816,8 +9821,8 @@
                                           Handle<Object> value,
                                           PropertyAttributes attributes,
                                           StoreFromKeyed store_mode) {
-  // Dictionary maps can always have additional data properties.
-  if (map->is_dictionary_map()) return map;
+  DCHECK(name->IsUniqueName());
+  DCHECK(!map->is_dictionary_map());
 
   // Migrate to the newest map before storing the property.
   map = Update(map);
@@ -9832,7 +9837,7 @@
                               ->GetDetails(descriptor)
                               .attributes());
 
-    return Map::PrepareForDataProperty(transition, descriptor, value);
+    return UpdateDescriptorForValue(transition, descriptor, value);
   }
 
   TransitionFlag flag = INSERT_TRANSITION;
@@ -9842,7 +9847,7 @@
   } else if (!map->TooManyFastProperties(store_mode)) {
     Isolate* isolate = name->GetIsolate();
     Representation representation = value->OptimalRepresentation();
-    Handle<HeapType> type = value->OptimalType(isolate, representation);
+    Handle<FieldType> type = value->OptimalType(isolate, representation);
     maybe_map =
         Map::CopyWithField(map, name, type, attributes, representation, flag);
   }
@@ -9887,7 +9892,7 @@
   Isolate* isolate = map->GetIsolate();
   Handle<Map> new_map = ReconfigureProperty(
       map, descriptor, kind, attributes, Representation::None(),
-      HeapType::None(isolate), FORCE_FIELD);
+      FieldType::None(isolate), FORCE_FIELD);
   return new_map;
 }
 
@@ -9897,6 +9902,7 @@
                                               AccessorComponent component,
                                               Handle<Object> accessor,
                                               PropertyAttributes attributes) {
+  DCHECK(name->IsUniqueName());
   Isolate* isolate = name->GetIsolate();
 
   // Dictionary maps can always have additional data properties.
@@ -9935,7 +9941,7 @@
 
   Handle<AccessorPair> pair;
   DescriptorArray* old_descriptors = map->instance_descriptors();
-  int descriptor = old_descriptors->SearchWithCache(*name, *map);
+  int descriptor = old_descriptors->SearchWithCache(isolate, *name, *map);
   if (descriptor != DescriptorArray::kNotFound) {
     if (descriptor != map->LastAdded()) {
       return Map::Normalize(map, mode, "AccessorsOverwritingNonLast");
@@ -9981,9 +9987,6 @@
                                    TransitionFlag flag) {
   Handle<DescriptorArray> descriptors(map->instance_descriptors());
 
-  // Ensure the key is unique.
-  descriptor->KeyToUniqueName();
-
   // Share descriptors only if map owns descriptors and it not an initial map.
   if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
       !map->GetBackPointer()->IsUndefined() &&
@@ -10012,11 +10015,9 @@
                                       TransitionFlag flag) {
   Handle<DescriptorArray> old_descriptors(map->instance_descriptors());
 
-  // Ensure the key is unique.
-  descriptor->KeyToUniqueName();
-
   // We replace the key if it is already present.
-  int index = old_descriptors->SearchWithCache(*descriptor->GetKey(), *map);
+  int index = old_descriptors->SearchWithCache(map->GetIsolate(),
+                                               *descriptor->GetKey(), *map);
   if (index != DescriptorArray::kNotFound) {
     return CopyReplaceDescriptor(map, old_descriptors, descriptor, index, flag);
   }
@@ -10099,9 +10100,6 @@
                                        Descriptor* descriptor,
                                        int insertion_index,
                                        TransitionFlag flag) {
-  // Ensure the key is unique.
-  descriptor->KeyToUniqueName();
-
   Handle<Name> key = descriptor->GetKey();
   DCHECK(*key == descriptors->GetKey(insertion_index));
 
@@ -10796,17 +10794,18 @@
   return array;
 }
 
-
 Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
                                                   int number_of_descriptors,
-                                                  int slack) {
+                                                  int slack,
+                                                  PretenureFlag pretenure) {
   DCHECK(0 <= number_of_descriptors);
   Factory* factory = isolate->factory();
   // Do not use DescriptorArray::cast on incomplete object.
   int size = number_of_descriptors + slack;
   if (size == 0) return factory->empty_descriptor_array();
   // Allocate the array of keys.
-  Handle<FixedArray> result = factory->NewFixedArray(LengthFor(size), TENURED);
+  Handle<FixedArray> result =
+      factory->NewFixedArray(LengthFor(size), pretenure);
 
   result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
   result->set(kEnumCacheIndex, Smi::FromInt(0));
@@ -10921,13 +10920,21 @@
   return copy;
 }
 
-
-Object* AccessorPair::GetComponent(AccessorComponent component) {
-  Object* accessor = get(component);
-  return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
+Handle<Object> AccessorPair::GetComponent(Handle<AccessorPair> accessor_pair,
+                                          AccessorComponent component) {
+  Object* accessor = accessor_pair->get(component);
+  if (accessor->IsFunctionTemplateInfo()) {
+    return ApiNatives::InstantiateFunction(
+               handle(FunctionTemplateInfo::cast(accessor)))
+        .ToHandleChecked();
+  }
+  Isolate* isolate = accessor_pair->GetIsolate();
+  if (accessor->IsTheHole()) {
+    return isolate->factory()->undefined_value();
+  }
+  return handle(accessor, isolate);
 }
 
-
 Handle<DeoptimizationInputData> DeoptimizationInputData::New(
     Isolate* isolate, int deopt_entry_count, PretenureFlag pretenure) {
   return Handle<DeoptimizationInputData>::cast(
@@ -10963,23 +10970,31 @@
   return casted_literals;
 }
 
-
-int HandlerTable::LookupRange(int pc_offset, int* stack_depth_out,
+int HandlerTable::LookupRange(int pc_offset, int* data_out,
                               CatchPrediction* prediction_out) {
-  int innermost_handler = -1, innermost_start = -1;
+  int innermost_handler = -1;
+#ifdef DEBUG
+  // Assuming that ranges are well nested, we don't need to track the innermost
+  // offsets. This is just to verify that the table is actually well nested.
+  int innermost_start = std::numeric_limits<int>::min();
+  int innermost_end = std::numeric_limits<int>::max();
+#endif
   for (int i = 0; i < length(); i += kRangeEntrySize) {
     int start_offset = Smi::cast(get(i + kRangeStartIndex))->value();
     int end_offset = Smi::cast(get(i + kRangeEndIndex))->value();
     int handler_field = Smi::cast(get(i + kRangeHandlerIndex))->value();
     int handler_offset = HandlerOffsetField::decode(handler_field);
     CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
-    int stack_depth = Smi::cast(get(i + kRangeDepthIndex))->value();
+    int handler_data = Smi::cast(get(i + kRangeDataIndex))->value();
     if (pc_offset > start_offset && pc_offset <= end_offset) {
-      DCHECK_NE(start_offset, innermost_start);
-      if (start_offset < innermost_start) continue;
+      DCHECK_GE(start_offset, innermost_start);
+      DCHECK_LT(end_offset, innermost_end);
       innermost_handler = handler_offset;
+#ifdef DEBUG
       innermost_start = start_offset;
-      *stack_depth_out = stack_depth;
+      innermost_end = end_offset;
+#endif
+      if (data_out) *data_out = handler_data;
       if (prediction_out) *prediction_out = prediction;
     }
   }
@@ -12257,7 +12272,8 @@
          first->bit_field() == second->bit_field() &&
          first->is_extensible() == second->is_extensible() &&
          first->is_strong() == second->is_strong() &&
-         first->is_hidden_prototype() == second->is_hidden_prototype();
+         first->new_target_is_base() == second->new_target_is_base() &&
+         first->has_hidden_prototype() == second->has_hidden_prototype();
 }
 
 }  // namespace
@@ -12564,23 +12580,27 @@
   map->SetInObjectProperties(map->GetInObjectProperties() - slack);
   map->set_unused_property_fields(map->unused_property_fields() - slack);
   map->set_instance_size(map->instance_size() - slack * kPointerSize);
+  map->set_construction_counter(Map::kNoSlackTracking);
 
   // Visitor id might depend on the instance size, recalculate it.
   map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
 }
 
+static void StopSlackTracking(Map* map, void* data) {
+  map->set_construction_counter(Map::kNoSlackTracking);
+}
 
 void Map::CompleteInobjectSlackTracking() {
   // Has to be an initial map.
   DCHECK(GetBackPointer()->IsUndefined());
 
-  set_construction_counter(kNoSlackTracking);
-
   int slack = unused_property_fields();
   TransitionArray::TraverseTransitionTree(this, &GetMinInobjectSlack, &slack);
   if (slack != 0) {
     // Resize the initial map and all maps in its transition tree.
     TransitionArray::TraverseTransitionTree(this, &ShrinkInstanceSize, &slack);
+  } else {
+    TransitionArray::TraverseTransitionTree(this, &StopSlackTracking, nullptr);
   }
 }
 
@@ -12820,10 +12840,22 @@
 // static
 void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
                        PrototypeOptimizationMode proto_mode) {
+  bool is_hidden = false;
   if (prototype->IsJSObject()) {
     Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
     JSObject::OptimizeAsPrototype(prototype_jsobj, proto_mode);
+
+    Object* maybe_constructor = prototype_jsobj->map()->GetConstructor();
+    if (maybe_constructor->IsJSFunction()) {
+      JSFunction* constructor = JSFunction::cast(maybe_constructor);
+      Object* data = constructor->shared()->function_data();
+      is_hidden = (data->IsFunctionTemplateInfo() &&
+                   FunctionTemplateInfo::cast(data)->hidden_prototype()) ||
+                  prototype->IsJSGlobalObject();
+    }
   }
+  map->set_has_hidden_prototype(is_hidden);
+
   WriteBarrierMode wb_mode =
       prototype->IsNull() ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
   map->set_prototype(*prototype, wb_mode);
@@ -12923,7 +12955,8 @@
 
 void JSFunction::SetPrototype(Handle<JSFunction> function,
                               Handle<Object> value) {
-  DCHECK(function->IsConstructor());
+  DCHECK(function->IsConstructor() ||
+         IsGeneratorFunction(function->shared()->kind()));
   Handle<Object> construct_prototype = value;
 
   // If the value is not a JSReceiver, store the value in the map's
@@ -13011,7 +13044,6 @@
     case JS_MAP_TYPE:
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
-    case JS_ITERATOR_RESULT_TYPE:
     case JS_WEAK_MAP_TYPE:
     case JS_WEAK_SET_TYPE:
     case JS_PROMISE_TYPE:
@@ -13155,6 +13187,7 @@
 
       JSFunction::SetInitialMap(function, map, prototype);
       map->SetConstructor(*constructor);
+      map->set_construction_counter(Map::kNoSlackTracking);
       map->StartInobjectSlackTracking();
       return map;
     }
@@ -13268,6 +13301,22 @@
   return JSFunction::GetName(function);
 }
 
+void JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
+                         Handle<String> prefix) {
+  Isolate* isolate = function->GetIsolate();
+  Handle<String> function_name = Name::ToFunctionName(name).ToHandleChecked();
+  if (prefix->length() > 0) {
+    IncrementalStringBuilder builder(isolate);
+    builder.AppendString(prefix);
+    builder.AppendCharacter(' ');
+    builder.AppendString(function_name);
+    function_name = builder.Finish().ToHandleChecked();
+  }
+  JSObject::DefinePropertyOrElementIgnoreAttributes(
+      function, isolate->factory()->name_string(), function_name,
+      static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY))
+      .ToHandleChecked();
+}
 
 namespace {
 
@@ -13339,7 +13388,7 @@
     }
     if (shared_info->name_should_print_as_anonymous()) {
       builder.AppendCString("anonymous");
-    } else {
+    } else if (!shared_info->is_anonymous_expression()) {
       builder.AppendString(handle(String::cast(shared_info->name()), isolate));
     }
   }
@@ -13666,7 +13715,9 @@
   for (PrototypeIterator iter(isolate, this,
                               PrototypeIterator::START_AT_RECEIVER);
        !iter.IsAtEnd(); iter.Advance()) {
-    JSFunction* func = iter.GetCurrent<JSFunction>();
+    JSReceiver* current = iter.GetCurrent<JSReceiver>();
+    if (!current->IsJSFunction()) break;
+    JSFunction* func = JSFunction::cast(current);
     SharedFunctionInfo* shared = func->shared();
     expected_nof_properties += shared->expected_nof_properties();
     if (!IsSubclassConstructor(shared->kind())) {
@@ -13774,8 +13825,9 @@
   shared_info->set_function_token_position(lit->function_token_position());
   shared_info->set_start_position(lit->start_position());
   shared_info->set_end_position(lit->end_position());
-  shared_info->set_is_expression(lit->is_expression());
-  shared_info->set_is_anonymous(lit->is_anonymous());
+  shared_info->set_is_declaration(lit->is_declaration());
+  shared_info->set_is_named_expression(lit->is_named_expression());
+  shared_info->set_is_anonymous_expression(lit->is_anonymous_expression());
   shared_info->set_inferred_name(*lit->inferred_name());
   shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
   shared_info->set_allows_lazy_compilation_without_context(
@@ -14064,12 +14116,12 @@
   Assembler::FlushICache(GetIsolate(), instruction_start(), instruction_size());
 }
 
-
-// Locate the source position which is closest to the address in the code. This
-// is using the source position information embedded in the relocation info.
+// Locate the source position which is closest to the code offset. This is
+// using the source position information embedded in the relocation info.
 // The position returned is relative to the beginning of the script where the
 // source for this function is found.
-int Code::SourcePosition(Address pc) {
+int Code::SourcePosition(int code_offset) {
+  Address pc = instruction_start() + code_offset;
   int distance = kMaxInt;
   int position = RelocInfo::kNoPosition;  // Initially no position found.
   // Run through all the relocation info to find the best matching source
@@ -14101,10 +14153,10 @@
 
 // Same as Code::SourcePosition above except it only looks for statement
 // positions.
-int Code::SourceStatementPosition(Address pc) {
+int Code::SourceStatementPosition(int code_offset) {
   // First find the position as close as possible using all position
   // information.
-  int position = SourcePosition(pc);
+  int position = SourcePosition(code_offset);
   // Now find the closest statement position before the position.
   int statement_position = 0;
   RelocIterator it(this, RelocInfo::kPositionMask);
@@ -14305,6 +14357,15 @@
   }
 }
 
+int AbstractCode::SourcePosition(int offset) {
+  return IsBytecodeArray() ? GetBytecodeArray()->SourcePosition(offset)
+                           : GetCode()->SourcePosition(offset);
+}
+
+int AbstractCode::SourceStatementPosition(int offset) {
+  return IsBytecodeArray() ? GetBytecodeArray()->SourceStatementPosition(offset)
+                           : GetCode()->SourceStatementPosition(offset);
+}
 
 void SharedFunctionInfo::ClearTypeFeedbackInfo() {
   feedback_vector()->ClearSlots(this);
@@ -14526,7 +14587,9 @@
   for (int i = 0; i < deopt_data->DeoptCount(); i++) {
     if (deopt_data->Pc(i)->value() == -1) continue;
     Address address = code_start_address + deopt_data->Pc(i)->value();
-    if (address == pc) return true;
+    if (address == pc && deopt_data->AstId(i) != BailoutId::None()) {
+      return true;
+    }
   }
   return false;
 }
@@ -14640,11 +14703,6 @@
           break;
         }
 
-        case Translation::JS_FRAME_FUNCTION: {
-          os << "{function}";
-          break;
-        }
-
         case Translation::COMPILED_STUB_FRAME: {
           Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
           os << "{kind=" << stub_kind << "}";
@@ -14786,10 +14844,10 @@
     int handler_field = Smi::cast(get(i + kRangeHandlerIndex))->value();
     int handler_offset = HandlerOffsetField::decode(handler_field);
     CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
-    int depth = Smi::cast(get(i + kRangeDepthIndex))->value();
+    int data = Smi::cast(get(i + kRangeDataIndex))->value();
     os << "  (" << std::setw(4) << pc_start << "," << std::setw(4) << pc_end
        << ")  ->  " << std::setw(4) << handler_offset
-       << " (prediction=" << prediction << ", depth=" << depth << ")\n";
+       << " (prediction=" << prediction << ", data=" << data << ")\n";
   }
 }
 
@@ -14997,6 +15055,34 @@
 }
 #endif  // ENABLE_DISASSEMBLER
 
+int BytecodeArray::SourcePosition(int offset) {
+  int last_position = 0;
+  for (interpreter::SourcePositionTableIterator iterator(this);
+       !iterator.done() && iterator.bytecode_offset() <= offset;
+       iterator.Advance()) {
+    last_position = iterator.source_position();
+  }
+  return last_position;
+}
+
+int BytecodeArray::SourceStatementPosition(int offset) {
+  // First find the position as close as possible using all position
+  // information.
+  int position = SourcePosition(offset);
+  // Now find the closest statement position before the position.
+  int statement_position = 0;
+  interpreter::SourcePositionTableIterator iterator(this);
+  while (!iterator.done()) {
+    if (iterator.is_statement()) {
+      int p = iterator.source_position();
+      if (statement_position < p && p <= position) {
+        statement_position = p;
+      }
+    }
+    iterator.Advance();
+  }
+  return statement_position;
+}
 
 void BytecodeArray::Disassemble(std::ostream& os) {
   os << "Parameter count " << parameter_count() << "\n";
@@ -15005,12 +15091,23 @@
 
   const uint8_t* first_bytecode_address = GetFirstBytecodeAddress();
   int bytecode_size = 0;
+
+  interpreter::SourcePositionTableIterator source_positions(this);
+
   for (int i = 0; i < this->length(); i += bytecode_size) {
     const uint8_t* bytecode_start = &first_bytecode_address[i];
     interpreter::Bytecode bytecode =
         interpreter::Bytecodes::FromByte(bytecode_start[0]);
     bytecode_size = interpreter::Bytecodes::Size(bytecode);
 
+    if (!source_positions.done() && i == source_positions.bytecode_offset()) {
+      os << std::setw(5) << source_positions.source_position();
+      os << (source_positions.is_statement() ? " S> " : " E> ");
+      source_positions.Advance();
+    } else {
+      os << "         ";
+    }
+
     SNPrintF(buf, "%p", bytecode_start);
     os << buf.start() << " : ";
     interpreter::Bytecodes::Decode(os, bytecode_start, parameter_count());
@@ -15033,13 +15130,29 @@
       SNPrintF(buf, " (%p)", bytecode_start + offset);
       os << buf.start();
     }
-    os << "\n";
+
+    os << std::endl;
   }
 
-  os << "Constant pool (size = " << constant_pool()->length() << ")\n";
-  constant_pool()->Print();
+  if (constant_pool()->length() > 0) {
+    os << "Constant pool (size = " << constant_pool()->length() << ")\n";
+    constant_pool()->Print();
+  }
+
+#ifdef ENABLE_DISASSEMBLER
+  if (handler_table()->length() > 0) {
+    os << "Handler Table (size = " << handler_table()->Size() << ")\n";
+    HandlerTable::cast(handler_table())->HandlerTableRangePrint(os);
+  }
+#endif
 }
 
+void BytecodeArray::CopyBytecodesTo(BytecodeArray* to) {
+  BytecodeArray* from = this;
+  DCHECK_EQ(from->length(), to->length());
+  CopyBytes(to->GetFirstBytecodeAddress(), from->GetFirstBytecodeAddress(),
+            from->length());
+}
 
 // static
 void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
@@ -15527,33 +15640,35 @@
       Execution::Call(isolate, trap, handler, arraysize(argv), argv),
       Nothing<bool>());
   bool bool_trap_result = trap_result->BooleanValue();
-  // 9. Let extensibleTarget be ? IsExtensible(target).
+  // 9. If booleanTrapResult is false, return false.
+  if (!bool_trap_result) {
+    RETURN_FAILURE(
+        isolate, should_throw,
+        NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+  }
+  // 10. Let extensibleTarget be ? IsExtensible(target).
   Maybe<bool> is_extensible = JSReceiver::IsExtensible(target);
   if (is_extensible.IsNothing()) return Nothing<bool>();
-  // 10. If extensibleTarget is true, return booleanTrapResult.
+  // 11. If extensibleTarget is true, return true.
   if (is_extensible.FromJust()) {
     if (bool_trap_result) return Just(true);
     RETURN_FAILURE(
         isolate, should_throw,
         NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
   }
-  // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
+  // 12. Let targetProto be ? target.[[GetPrototypeOf]]().
   Handle<Object> target_proto;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, target_proto,
-                                   Object::GetPrototype(isolate, target),
+                                   JSReceiver::GetPrototype(isolate, target),
                                    Nothing<bool>());
-  // 12. If booleanTrapResult is true and SameValue(V, targetProto) is false,
-  // throw a TypeError exception.
+  // 13. If SameValue(V, targetProto) is false, throw a TypeError exception.
   if (bool_trap_result && !value->SameValue(*target_proto)) {
     isolate->Throw(*isolate->factory()->NewTypeError(
         MessageTemplate::kProxySetPrototypeOfNonExtensible));
     return Nothing<bool>();
   }
-  // 13. Return booleanTrapResult.
-  if (bool_trap_result) return Just(true);
-  RETURN_FAILURE(
-      isolate, should_throw,
-      NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+  // 14. Return true.
+  return Just(true);
 }
 
 
@@ -15562,11 +15677,21 @@
                                    ShouldThrow should_throw) {
   Isolate* isolate = object->GetIsolate();
 
+  // Setting the prototype of an Array instance invalidates the species
+  // protector
+  // because it could change the constructor property of the instance, which
+  // could change the @@species constructor.
+  if (object->IsJSArray() && isolate->IsArraySpeciesLookupChainIntact()) {
+    isolate->CountUsage(
+        v8::Isolate::UseCounterFeature::kArrayInstanceProtoModified);
+    isolate->InvalidateArraySpeciesProtector();
+  }
+
   const bool observed = from_javascript && object->map()->is_observed();
   Handle<Object> old_value;
   if (observed) {
     ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, old_value,
-                                     Object::GetPrototype(isolate, object),
+                                     JSReceiver::GetPrototype(isolate, object),
                                      Nothing<bool>());
   }
 
@@ -15577,7 +15702,7 @@
   if (result.FromJust() && observed) {
     Handle<Object> new_value;
     ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, new_value,
-                                     Object::GetPrototype(isolate, object),
+                                     JSReceiver::GetPrototype(isolate, object),
                                      Nothing<bool>());
     if (!new_value->SameValue(*old_value)) {
       RETURN_ON_EXCEPTION_VALUE(
@@ -15633,8 +15758,10 @@
   if (from_javascript) {
     // Find the first object in the chain whose prototype object is not
     // hidden.
-    PrototypeIterator iter(isolate, real_receiver);
-    while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+    PrototypeIterator iter(isolate, real_receiver,
+                           PrototypeIterator::START_AT_PROTOTYPE,
+                           PrototypeIterator::END_AT_NON_HIDDEN);
+    while (!iter.IsAtEnd()) {
       // Casting to JSObject is fine because hidden prototypes are never
       // JSProxies.
       real_receiver = PrototypeIterator::GetCurrent<JSObject>(iter);
@@ -15663,13 +15790,15 @@
   // Before we can set the prototype we need to be sure prototype cycles are
   // prevented.  It is sufficient to validate that the receiver is not in the
   // new prototype chain.
-  for (PrototypeIterator iter(isolate, *value,
-                              PrototypeIterator::START_AT_RECEIVER);
-       !iter.IsAtEnd(); iter.Advance()) {
-    if (iter.GetCurrent<JSReceiver>() == *object) {
-      // Cycle detected.
-      RETURN_FAILURE(isolate, should_throw,
-                     NewTypeError(MessageTemplate::kCyclicProto));
+  if (value->IsJSReceiver()) {
+    for (PrototypeIterator iter(isolate, JSReceiver::cast(*value),
+                                PrototypeIterator::START_AT_RECEIVER);
+         !iter.IsAtEnd(); iter.Advance()) {
+      if (iter.GetCurrent<JSReceiver>() == *object) {
+        // Cycle detected.
+        RETURN_FAILURE(isolate, should_throw,
+                       NewTypeError(MessageTemplate::kCyclicProto));
+      }
     }
   }
 
@@ -15767,6 +15896,9 @@
   if (object->HasSloppyArgumentsElements()) {
     return FAST_SLOPPY_ARGUMENTS_ELEMENTS;
   }
+  if (object->HasStringWrapperElements()) {
+    return FAST_STRING_WRAPPER_ELEMENTS;
+  }
   DCHECK(object->HasDictionaryElements());
   SeededNumberDictionary* dictionary = object->element_dictionary();
   ElementsKind kind = FAST_HOLEY_SMI_ELEMENTS;
@@ -15850,6 +15982,8 @@
   if (IsSloppyArgumentsElements(kind)) {
     elements = FixedArrayBase::cast(FixedArray::cast(elements)->get(1));
     dictionary_kind = SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
+  } else if (IsStringWrapperElementsKind(kind)) {
+    dictionary_kind = SLOW_STRING_WRAPPER_ELEMENTS;
   }
 
   if (attributes != NONE) {
@@ -16037,7 +16171,8 @@
   {
     DisallowHeapAllocation no_allocation;
 
-    AllocationMemento* memento = heap->FindAllocationMemento(*object);
+    AllocationMemento* memento =
+        heap->FindAllocationMemento<Heap::kForRuntime>(*object);
     if (memento == NULL) return;
 
     // Walk through to the Allocation Site
@@ -16144,13 +16279,16 @@
     // Fall through.
     case FAST_HOLEY_SMI_ELEMENTS:
     case FAST_HOLEY_ELEMENTS:
+    case FAST_STRING_WRAPPER_ELEMENTS:
       return FastHoleyElementsUsage(this, FixedArray::cast(store));
     case FAST_HOLEY_DOUBLE_ELEMENTS:
       if (elements()->length() == 0) return 0;
       return FastHoleyElementsUsage(this, FixedDoubleArray::cast(store));
 
     case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS:
     case DICTIONARY_ELEMENTS:
+    case NO_ELEMENTS:
 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)                      \
     case TYPE##_ELEMENTS:                                                    \
 
@@ -16230,7 +16368,8 @@
   Handle<JSObject> holder = it->GetHolder<JSObject>();
   v8::Local<v8::Value> result;
   PropertyCallbackArguments args(isolate, interceptor->data(),
-                                 *it->GetReceiver(), *holder);
+                                 *it->GetReceiver(), *holder,
+                                 Object::DONT_THROW);
 
   if (it->IsElement()) {
     uint32_t index = it->index();
@@ -16421,7 +16560,7 @@
       }
       Name* key = descs->GetKey(i);
       if (key->FilterKey(filter)) continue;
-      keys->AddKey(key);
+      keys->AddKey(key, DO_NOT_CONVERT);
     }
   } else if (IsJSGlobalObject()) {
     GlobalDictionary::CollectKeysTo(handle(global_dictionary()), keys, filter);
@@ -16450,21 +16589,6 @@
                                      KeyAccumulator* keys,
                                      PropertyFilter filter) {
   if (filter & SKIP_STRINGS) return;
-  uint32_t string_keys = 0;
-
-  // If this is a String wrapper, add the string indices first,
-  // as they're guaranteed to precede the elements in numerical order
-  // and ascending order is required by ECMA-262, 6th, 9.1.12.
-  if (object->IsJSValue()) {
-    Object* val = JSValue::cast(*object)->value();
-    if (val->IsString() && (filter & ONLY_ALL_CAN_READ) == 0) {
-      String* str = String::cast(val);
-      string_keys = str->length();
-      for (uint32_t i = 0; i < string_keys; i++) {
-        keys->AddKey(i);
-      }
-    }
-  }
   ElementsAccessor* accessor = object->GetElementsAccessor();
   accessor->CollectElementIndices(object, keys, kMaxUInt32, filter, 0);
 }
@@ -16493,7 +16617,8 @@
     case FAST_SMI_ELEMENTS:
     case FAST_ELEMENTS:
     case FAST_HOLEY_SMI_ELEMENTS:
-    case FAST_HOLEY_ELEMENTS: {
+    case FAST_HOLEY_ELEMENTS:
+    case FAST_STRING_WRAPPER_ELEMENTS: {
       int length = IsJSArray() ?
           Smi::cast(JSArray::cast(this)->length())->value() :
           FixedArray::cast(elements())->length();
@@ -16542,7 +16667,8 @@
       break;
     }
 
-    case DICTIONARY_ELEMENTS: {
+    case DICTIONARY_ELEMENTS:
+    case SLOW_STRING_WRAPPER_ELEMENTS: {
       if (storage != NULL) {
         element_dictionary()->CopyKeysTo(storage, counter, filter,
                                          SeededNumberDictionary::SORTED);
@@ -16592,6 +16718,8 @@
       }
       break;
     }
+    case NO_ELEMENTS:
+      break;
   }
 
   DCHECK(!storage || storage->length() == counter);
@@ -16604,8 +16732,8 @@
   if (object->IsUndefined()) return isolate->factory()->undefined_to_string();
   if (object->IsNull()) return isolate->factory()->null_to_string();
 
-  Handle<JSReceiver> receiver;
-  CHECK(Object::ToObject(isolate, object).ToHandle(&receiver));
+  Handle<JSReceiver> receiver =
+      Object::ToObject(isolate, object).ToHandleChecked();
 
   Handle<String> tag;
   if (FLAG_harmony_tostring) {
@@ -17221,6 +17349,16 @@
       }
     }
   }
+  // Wipe deleted entries.
+  Heap* heap = GetHeap();
+  Object* the_hole = heap->the_hole_value();
+  Object* undefined = heap->undefined_value();
+  for (uint32_t current = 0; current < capacity; current++) {
+    if (get(EntryToIndex(current)) == the_hole) {
+      set(EntryToIndex(current), undefined);
+    }
+  }
+  SetNumberOfDeletedElements(0);
 }
 
 
@@ -17550,6 +17688,11 @@
     return handle(Smi::FromInt(-1), isolate);
   }
 
+  if (object->HasStringWrapperElements()) {
+    int len = String::cast(Handle<JSValue>::cast(object)->value())->length();
+    return handle(Smi::FromInt(len), isolate);
+  }
+
   if (object->HasDictionaryElements()) {
     // Convert to fast elements containing only the existing properties.
     // Ordering is irrelevant, since we are going to sort anyway.
@@ -18489,7 +18632,7 @@
 
   for (int i = 0; i < array_size; i++) {
     int index = Smi::cast(array->get(i))->value();
-    keys->AddKey(dictionary->KeyAt(index));
+    keys->AddKey(dictionary->KeyAt(index), DO_NOT_CONVERT);
   }
 }
 
@@ -18576,6 +18719,12 @@
     return table;
   }
 
+  // Rehash if more than 25% of the entries are deleted entries.
+  // TODO(jochen): Consider to shrink the fixed array in place.
+  if ((table->NumberOfDeletedElements() << 1) > table->NumberOfElements()) {
+    table->Rehash(isolate->factory()->undefined_value());
+  }
+
   // Check whether the hash table should be extended.
   table = EnsureCapacity(table, 1, key);
   table->AddEntry(table->FindInsertionEntry(hash), *key, *value);
@@ -19040,35 +19189,31 @@
   return was_present;
 }
 
-
-// Check if there is a break point at this code position.
-bool DebugInfo::HasBreakPoint(int code_position) {
-  // Get the break point info object for this code position.
-  Object* break_point_info = GetBreakPointInfo(code_position);
+// Check if there is a break point at this code offset.
+bool DebugInfo::HasBreakPoint(int code_offset) {
+  // Get the break point info object for this code offset.
+  Object* break_point_info = GetBreakPointInfo(code_offset);
 
   // If there is no break point info object or no break points in the break
-  // point info object there is no break point at this code position.
+  // point info object there is no break point at this code offset.
   if (break_point_info->IsUndefined()) return false;
   return BreakPointInfo::cast(break_point_info)->GetBreakPointCount() > 0;
 }
 
-
-// Get the break point info object for this code position.
-Object* DebugInfo::GetBreakPointInfo(int code_position) {
-  // Find the index of the break point info object for this code position.
-  int index = GetBreakPointInfoIndex(code_position);
+// Get the break point info object for this code offset.
+Object* DebugInfo::GetBreakPointInfo(int code_offset) {
+  // Find the index of the break point info object for this code offset.
+  int index = GetBreakPointInfoIndex(code_offset);
 
   // Return the break point info object if any.
   if (index == kNoBreakPointInfo) return GetHeap()->undefined_value();
   return BreakPointInfo::cast(break_points()->get(index));
 }
 
-
-// Clear a break point at the specified code position.
-void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
-                                int code_position,
+// Clear a break point at the specified code offset.
+void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
                                 Handle<Object> break_point_object) {
-  Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
+  Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_offset),
                                   debug_info->GetIsolate());
   if (break_point_info->IsUndefined()) return;
   BreakPointInfo::ClearBreakPoint(
@@ -19076,14 +19221,11 @@
       break_point_object);
 }
 
-
-void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
-                              int code_position,
-                              int source_position,
-                              int statement_position,
+void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
+                              int source_position, int statement_position,
                               Handle<Object> break_point_object) {
   Isolate* isolate = debug_info->GetIsolate();
-  Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
+  Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_offset),
                                   isolate);
   if (!break_point_info->IsUndefined()) {
     BreakPointInfo::SetBreakPoint(
@@ -19092,7 +19234,7 @@
     return;
   }
 
-  // Adding a new break point for a code position which did not have any
+  // Adding a new break point for a code offset which did not have any
   // break points before. Try to find a free slot.
   int index = kNoBreakPointInfo;
   for (int i = 0; i < debug_info->break_points()->length(); i++) {
@@ -19121,7 +19263,7 @@
   // Allocate new BreakPointInfo object and set the break point.
   Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
       isolate->factory()->NewStruct(BREAK_POINT_INFO_TYPE));
-  new_break_point_info->set_code_position(code_position);
+  new_break_point_info->set_code_offset(code_offset);
   new_break_point_info->set_source_position(source_position);
   new_break_point_info->set_statement_position(statement_position);
   new_break_point_info->set_break_point_objects(
@@ -19130,10 +19272,9 @@
   debug_info->break_points()->set(index, *new_break_point_info);
 }
 
-
-// Get the break point objects for a code position.
-Handle<Object> DebugInfo::GetBreakPointObjects(int code_position) {
-  Object* break_point_info = GetBreakPointInfo(code_position);
+// Get the break point objects for a code offset.
+Handle<Object> DebugInfo::GetBreakPointObjects(int code_offset) {
+  Object* break_point_info = GetBreakPointInfo(code_offset);
   if (break_point_info->IsUndefined()) {
     return GetIsolate()->factory()->undefined_value();
   }
@@ -19179,13 +19320,13 @@
 
 // Find the index of the break point info object for the specified code
 // position.
-int DebugInfo::GetBreakPointInfoIndex(int code_position) {
+int DebugInfo::GetBreakPointInfoIndex(int code_offset) {
   if (break_points()->IsUndefined()) return kNoBreakPointInfo;
   for (int i = 0; i < break_points()->length(); i++) {
     if (!break_points()->get(i)->IsUndefined()) {
       BreakPointInfo* break_point_info =
           BreakPointInfo::cast(break_points()->get(i));
-      if (break_point_info->code_position() == code_position) {
+      if (break_point_info->code_offset() == code_offset) {
         return i;
       }
     }
diff --git a/src/objects.h b/src/objects.h
index c55c5c9..61c6e5e 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -7,7 +7,6 @@
 
 #include <iosfwd>
 
-#include "src/allocation.h"
 #include "src/assert-scope.h"
 #include "src/bailout-reason.h"
 #include "src/base/bits.h"
@@ -127,6 +126,7 @@
 //     - Cell
 //     - PropertyCell
 //     - Code
+//     - AbstractCode, a wrapper around Code or BytecodeArray
 //     - Map
 //     - Oddball
 //     - Foreign
@@ -134,7 +134,6 @@
 //     - Struct
 //       - Box
 //       - AccessorInfo
-//         - ExecutableAccessorInfo
 //       - AccessorPair
 //       - AccessCheckInfo
 //       - InterceptorInfo
@@ -384,9 +383,7 @@
                                                                 \
   V(FILLER_TYPE)                                                \
                                                                 \
-  V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE)                          \
-  V(DECLARED_ACCESSOR_INFO_TYPE)                                \
-  V(EXECUTABLE_ACCESSOR_INFO_TYPE)                              \
+  V(ACCESSOR_INFO_TYPE)                                         \
   V(ACCESSOR_PAIR_TYPE)                                         \
   V(ACCESS_CHECK_INFO_TYPE)                                     \
   V(INTERCEPTOR_INFO_TYPE)                                      \
@@ -431,7 +428,6 @@
   V(JS_MAP_TYPE)                                                \
   V(JS_SET_ITERATOR_TYPE)                                       \
   V(JS_MAP_ITERATOR_TYPE)                                       \
-  V(JS_ITERATOR_RESULT_TYPE)                                    \
   V(JS_WEAK_MAP_TYPE)                                           \
   V(JS_WEAK_SET_TYPE)                                           \
   V(JS_PROMISE_TYPE)                                            \
@@ -505,8 +501,7 @@
 // manually.
 #define STRUCT_LIST(V)                                                       \
   V(BOX, Box, box)                                                           \
-  V(EXECUTABLE_ACCESSOR_INFO, ExecutableAccessorInfo,                        \
-    executable_accessor_info)                                                \
+  V(ACCESSOR_INFO, AccessorInfo, accessor_info)                              \
   V(ACCESSOR_PAIR, AccessorPair, accessor_pair)                              \
   V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info)                   \
   V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info)                     \
@@ -676,9 +671,7 @@
   FILLER_TYPE,  // LAST_DATA_TYPE
 
   // Structs.
-  DECLARED_ACCESSOR_DESCRIPTOR_TYPE,
-  DECLARED_ACCESSOR_INFO_TYPE,
-  EXECUTABLE_ACCESSOR_INFO_TYPE,
+  ACCESSOR_INFO_TYPE,
   ACCESSOR_PAIR_TYPE,
   ACCESS_CHECK_INFO_TYPE,
   INTERCEPTOR_INFO_TYPE,
@@ -728,7 +721,6 @@
   JS_MAP_TYPE,
   JS_SET_ITERATOR_TYPE,
   JS_MAP_ITERATOR_TYPE,
-  JS_ITERATOR_RESULT_TYPE,
   JS_WEAK_MAP_TYPE,
   JS_WEAK_SET_TYPE,
   JS_PROMISE_TYPE,
@@ -846,6 +838,7 @@
 class LayoutDescriptor;
 class LiteralsArray;
 class LookupIterator;
+class FieldType;
 class ObjectHashTable;
 class ObjectVisitor;
 class PropertyCell;
@@ -858,11 +851,6 @@
 class WeakCell;
 class TransitionArray;
 
-// We cannot just say "class HeapType;" if it is created from a template... =8-?
-template<class> class TypeImpl;
-struct HeapTypeConfig;
-typedef TypeImpl<HeapTypeConfig> HeapType;
-
 
 // A template-ized version of the IsXXX functions.
 template <class C> inline bool Is(Object* obj);
@@ -879,10 +867,11 @@
 #define DECLARE_PRINTER(Name)
 #endif
 
-
 #define OBJECT_TYPE_LIST(V) \
   V(Smi)                    \
+  V(LayoutDescriptor)       \
   V(HeapObject)             \
+  V(Primitive)              \
   V(Number)
 
 #define HEAP_OBJECT_TYPE_LIST(V)   \
@@ -931,7 +920,6 @@
   V(JSContextExtensionObject)      \
   V(JSGeneratorObject)             \
   V(JSModule)                      \
-  V(LayoutDescriptor)              \
   V(Map)                           \
   V(DescriptorArray)               \
   V(TransitionArray)               \
@@ -953,6 +941,7 @@
   V(JSBoundFunction)               \
   V(JSFunction)                    \
   V(Code)                          \
+  V(AbstractCode)                  \
   V(Oddball)                       \
   V(SharedFunctionInfo)            \
   V(JSValue)                       \
@@ -971,7 +960,6 @@
   V(JSMap)                         \
   V(JSSetIterator)                 \
   V(JSMapIterator)                 \
-  V(JSIteratorResult)              \
   V(JSWeakCollection)              \
   V(JSWeakMap)                     \
   V(JSWeakSet)                     \
@@ -984,11 +972,18 @@
   V(CodeCacheHashTable)            \
   V(PolymorphicCodeCacheHashTable) \
   V(MapCache)                      \
-  V(Primitive)                     \
   V(JSGlobalObject)                \
   V(JSGlobalProxy)                 \
   V(UndetectableObject)            \
   V(AccessCheckNeeded)             \
+  V(Callable)                      \
+  V(Function)                      \
+  V(Constructor)                   \
+  V(TemplateInfo)                  \
+  V(Filler)                        \
+  V(FixedArrayBase)                \
+  V(External)                      \
+  V(Struct)                        \
   V(Cell)                          \
   V(PropertyCell)                  \
   V(WeakCell)                      \
@@ -996,6 +991,16 @@
   V(WeakHashTable)                 \
   V(OrderedHashTable)
 
+#define ODDBALL_LIST(V) \
+  V(Undefined)          \
+  V(Null)               \
+  V(TheHole)            \
+  V(Exception)          \
+  V(Uninitialized)      \
+  V(True)               \
+  V(False)              \
+  V(ArgumentsMarker)
+
 // The element types selection for CreateListFromArrayLike.
 enum class ElementTypes { kAll, kStringAndSymbol };
 
@@ -1013,6 +1018,7 @@
 #define IS_TYPE_FUNCTION_DECL(type_)  INLINE(bool Is##type_() const);
   OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
   HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+  ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
 #undef IS_TYPE_FUNCTION_DECL
 
   // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
@@ -1041,11 +1047,6 @@
 
 #define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
 
-  INLINE(bool IsFixedArrayBase() const);
-  INLINE(bool IsExternal() const);
-  INLINE(bool IsAccessorInfo() const);
-
-  INLINE(bool IsStruct() const);
 #define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \
   INLINE(bool Is##Name() const);
   STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
@@ -1054,16 +1055,6 @@
   // ES6, section 7.2.2 IsArray.  NOT to be confused with %_IsArray.
   MUST_USE_RESULT static Maybe<bool> IsArray(Handle<Object> object);
 
-  // Test for JSBoundFunction or JSFunction.
-  INLINE(bool IsFunction() const);
-
-  // ES6, section 7.2.3 IsCallable.
-  INLINE(bool IsCallable() const);
-
-  // ES6, section 7.2.4 IsConstructor.
-  INLINE(bool IsConstructor() const);
-
-  INLINE(bool IsTemplateInfo()) const;
   INLINE(bool IsNameDictionary() const);
   INLINE(bool IsGlobalDictionary() const);
   INLINE(bool IsSeededNumberDictionary() const);
@@ -1072,19 +1063,6 @@
   INLINE(bool IsOrderedHashMap() const);
   static bool IsPromise(Handle<Object> object);
 
-  // Oddball testing.
-  INLINE(bool IsUndefined() const);
-  INLINE(bool IsNull() const);
-  INLINE(bool IsTheHole() const);
-  INLINE(bool IsException() const);
-  INLINE(bool IsUninitialized() const);
-  INLINE(bool IsTrue() const);
-  INLINE(bool IsFalse() const);
-  INLINE(bool IsArgumentsMarker() const);
-
-  // Filler objects (fillers and free space objects).
-  INLINE(bool IsFiller() const);
-
   // Extract the number.
   inline double Number() const;
   INLINE(bool IsNaN() const);
@@ -1105,7 +1083,8 @@
 
   inline bool FilterKey(PropertyFilter filter);
 
-  Handle<HeapType> OptimalType(Isolate* isolate, Representation representation);
+  Handle<FieldType> OptimalType(Isolate* isolate,
+                                Representation representation);
 
   inline static Handle<Object> NewStorageFor(Isolate* isolate,
                                              Handle<Object> object,
@@ -1124,8 +1103,8 @@
   bool BooleanValue();                                      // ECMA-262 9.2.
 
   // ES6 section 7.2.11 Abstract Relational Comparison
-  MUST_USE_RESULT static Maybe<ComparisonResult> Compare(
-      Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
+  MUST_USE_RESULT static Maybe<ComparisonResult> Compare(Handle<Object> x,
+                                                         Handle<Object> y);
 
   // ES6 section 7.2.12 Abstract Equality Comparison
   MUST_USE_RESULT static Maybe<bool> Equals(Handle<Object> x, Handle<Object> y);
@@ -1135,8 +1114,8 @@
 
   // Convert to a JSObject if needed.
   // native_context is used when creating wrapper object.
-  static inline MaybeHandle<JSReceiver> ToObject(Isolate* isolate,
-                                                 Handle<Object> object);
+  MUST_USE_RESULT static inline MaybeHandle<JSReceiver> ToObject(
+      Isolate* isolate, Handle<Object> object);
   MUST_USE_RESULT static MaybeHandle<JSReceiver> ToObject(
       Isolate* isolate, Handle<Object> object, Handle<Context> context);
 
@@ -1186,58 +1165,56 @@
   static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
 
   // ES6 section 12.6 Multiplicative Operators
-  MUST_USE_RESULT static MaybeHandle<Object> Multiply(
-      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
-      Strength strength = Strength::WEAK);
-  MUST_USE_RESULT static MaybeHandle<Object> Divide(
-      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
-      Strength strength = Strength::WEAK);
-  MUST_USE_RESULT static MaybeHandle<Object> Modulus(
-      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
-      Strength strength = Strength::WEAK);
+  MUST_USE_RESULT static MaybeHandle<Object> Multiply(Isolate* isolate,
+                                                      Handle<Object> lhs,
+                                                      Handle<Object> rhs);
+  MUST_USE_RESULT static MaybeHandle<Object> Divide(Isolate* isolate,
+                                                    Handle<Object> lhs,
+                                                    Handle<Object> rhs);
+  MUST_USE_RESULT static MaybeHandle<Object> Modulus(Isolate* isolate,
+                                                     Handle<Object> lhs,
+                                                     Handle<Object> rhs);
 
   // ES6 section 12.7 Additive Operators
-  MUST_USE_RESULT static MaybeHandle<Object> Add(
-      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
-      Strength strength = Strength::WEAK);
-  MUST_USE_RESULT static MaybeHandle<Object> Subtract(
-      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
-      Strength strength = Strength::WEAK);
+  MUST_USE_RESULT static MaybeHandle<Object> Add(Isolate* isolate,
+                                                 Handle<Object> lhs,
+                                                 Handle<Object> rhs);
+  MUST_USE_RESULT static MaybeHandle<Object> Subtract(Isolate* isolate,
+                                                      Handle<Object> lhs,
+                                                      Handle<Object> rhs);
 
   // ES6 section 12.8 Bitwise Shift Operators
-  MUST_USE_RESULT static MaybeHandle<Object> ShiftLeft(
-      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
-      Strength strength = Strength::WEAK);
-  MUST_USE_RESULT static MaybeHandle<Object> ShiftRight(
-      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
-      Strength strength = Strength::WEAK);
+  MUST_USE_RESULT static MaybeHandle<Object> ShiftLeft(Isolate* isolate,
+                                                       Handle<Object> lhs,
+                                                       Handle<Object> rhs);
+  MUST_USE_RESULT static MaybeHandle<Object> ShiftRight(Isolate* isolate,
+                                                        Handle<Object> lhs,
+                                                        Handle<Object> rhs);
   MUST_USE_RESULT static MaybeHandle<Object> ShiftRightLogical(
-      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
-      Strength strength = Strength::WEAK);
+      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs);
 
   // ES6 section 12.9 Relational Operators
-  MUST_USE_RESULT static inline Maybe<bool> GreaterThan(
-      Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
+  MUST_USE_RESULT static inline Maybe<bool> GreaterThan(Handle<Object> x,
+                                                        Handle<Object> y);
   MUST_USE_RESULT static inline Maybe<bool> GreaterThanOrEqual(
-      Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
-  MUST_USE_RESULT static inline Maybe<bool> LessThan(
-      Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
-  MUST_USE_RESULT static inline Maybe<bool> LessThanOrEqual(
-      Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
+      Handle<Object> x, Handle<Object> y);
+  MUST_USE_RESULT static inline Maybe<bool> LessThan(Handle<Object> x,
+                                                     Handle<Object> y);
+  MUST_USE_RESULT static inline Maybe<bool> LessThanOrEqual(Handle<Object> x,
+                                                            Handle<Object> y);
 
   // ES6 section 12.11 Binary Bitwise Operators
-  MUST_USE_RESULT static MaybeHandle<Object> BitwiseAnd(
-      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
-      Strength strength = Strength::WEAK);
-  MUST_USE_RESULT static MaybeHandle<Object> BitwiseOr(
-      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
-      Strength strength = Strength::WEAK);
-  MUST_USE_RESULT static MaybeHandle<Object> BitwiseXor(
-      Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
-      Strength strength = Strength::WEAK);
+  MUST_USE_RESULT static MaybeHandle<Object> BitwiseAnd(Isolate* isolate,
+                                                        Handle<Object> lhs,
+                                                        Handle<Object> rhs);
+  MUST_USE_RESULT static MaybeHandle<Object> BitwiseOr(Isolate* isolate,
+                                                       Handle<Object> lhs,
+                                                       Handle<Object> rhs);
+  MUST_USE_RESULT static MaybeHandle<Object> BitwiseXor(Isolate* isolate,
+                                                        Handle<Object> lhs,
+                                                        Handle<Object> rhs);
 
-  MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
-      LookupIterator* it, LanguageMode language_mode = SLOPPY);
+  MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it);
 
   // ES6 [[Set]] (when passed DONT_THROW)
   // Invariants for this and related functions (unless stated otherwise):
@@ -1260,10 +1237,9 @@
       StoreFromKeyed store_mode);
 
   MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
-      LookupIterator* it, LanguageMode language_mode);
+      LookupIterator* it);
   MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
-      Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
-      LanguageMode language_mode);
+      Isolate* isolate, Handle<Object> receiver, Handle<Object> name);
   MUST_USE_RESULT static Maybe<bool> CannotCreateProperty(
       Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
       Handle<Object> value, ShouldThrow should_throw);
@@ -1281,20 +1257,16 @@
       LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
       ShouldThrow should_throw, StoreFromKeyed store_mode);
   MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
-      Handle<Object> object, Handle<Name> name,
-      LanguageMode language_mode = SLOPPY);
+      Handle<Object> object, Handle<Name> name);
   MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
-      Handle<JSReceiver> holder, Handle<Name> name, Handle<Object> receiver,
-      LanguageMode language_mode = SLOPPY);
+      Handle<Object> receiver, Handle<Name> name, Handle<JSReceiver> holder);
   MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
-      Isolate* isolate, Handle<Object> object, const char* key,
-      LanguageMode language_mode = SLOPPY);
+      Isolate* isolate, Handle<Object> object, const char* key);
   MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
-      Handle<Object> object, Handle<Name> name,
-      LanguageMode language_mode = SLOPPY);
+      Handle<Object> object, Handle<Name> name);
 
   MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
-      LookupIterator* it, LanguageMode language_mode);
+      LookupIterator* it);
   MUST_USE_RESULT static Maybe<bool> SetPropertyWithAccessor(
       LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
 
@@ -1306,21 +1278,12 @@
       ShouldThrow should_throw);
 
   MUST_USE_RESULT static inline MaybeHandle<Object> GetElement(
-      Isolate* isolate, Handle<Object> object, uint32_t index,
-      LanguageMode language_mode = SLOPPY);
+      Isolate* isolate, Handle<Object> object, uint32_t index);
 
   MUST_USE_RESULT static inline MaybeHandle<Object> SetElement(
       Isolate* isolate, Handle<Object> object, uint32_t index,
       Handle<Object> value, LanguageMode language_mode);
 
-  // Get the first non-hidden prototype.
-  static inline MaybeHandle<Object> GetPrototype(Isolate* isolate,
-                                                 Handle<Object> receiver);
-
-  MUST_USE_RESULT static Maybe<bool> HasInPrototypeChain(Isolate* isolate,
-                                                         Handle<Object> object,
-                                                         Handle<Object> proto);
-
   // Returns the permanent hash code associated with this object. May return
   // undefined if not yet created.
   Object* GetHash();
@@ -1359,10 +1322,6 @@
   // allow kMaxUInt32.
   inline bool ToArrayIndex(uint32_t* index);
 
-  // Returns true if this is a JSValue containing a string and the index is
-  // < the length of the string.  Used to implement [] on strings.
-  inline bool IsStringObjectWithCharacterAt(uint32_t index);
-
   DECLARE_VERIFIER(Object)
 #ifdef VERIFY_HEAP
   // Verify a pointer is a valid object pointer.
@@ -1401,7 +1360,7 @@
 
  private:
   friend class LookupIterator;
-  friend class PrototypeIterator;
+  friend class StringStream;
 
   // Return the map of the root of object's prototype chain.
   Map* GetRootMap(Isolate* isolate);
@@ -1555,6 +1514,15 @@
   // Convenience method to get current isolate.
   inline Isolate* GetIsolate() const;
 
+#define IS_TYPE_FUNCTION_DECL(type_) INLINE(bool Is##type_() const);
+  HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+  ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
+#undef IS_TYPE_FUNCTION_DECL
+#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \
+  INLINE(bool Is##Name() const);
+  STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
+#undef DECLARE_STRUCT_PREDICATE
+
   // Converts an address to a HeapObject pointer.
   static inline HeapObject* FromAddress(Address address) {
     DCHECK_TAG_ALIGNED(address);
@@ -1797,6 +1765,7 @@
 
 enum GetKeysConversion { KEEP_NUMBERS, CONVERT_TO_STRING };
 
+enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS };
 
 // JSReceiver includes types on which properties can be defined, i.e.,
 // JSObject and JSProxy.
@@ -1826,6 +1795,13 @@
 
   static MaybeHandle<Context> GetFunctionRealm(Handle<JSReceiver> receiver);
 
+  // Get the first non-hidden prototype.
+  static inline MaybeHandle<Object> GetPrototype(Isolate* isolate,
+                                                 Handle<JSReceiver> receiver);
+
+  MUST_USE_RESULT static Maybe<bool> HasInPrototypeChain(
+      Isolate* isolate, Handle<JSReceiver> object, Handle<Object> proto);
+
   // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
   MUST_USE_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
   MUST_USE_RESULT static inline Maybe<bool> HasProperty(
@@ -1958,13 +1934,10 @@
   inline static Handle<Smi> GetOrCreateIdentityHash(
       Handle<JSReceiver> object);
 
-  enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS };
-
   // ES6 [[OwnPropertyKeys]] (modulo return type)
   MUST_USE_RESULT static MaybeHandle<FixedArray> OwnPropertyKeys(
       Handle<JSReceiver> object) {
-    return GetKeys(object, JSReceiver::OWN_ONLY, ALL_PROPERTIES,
-                   CONVERT_TO_STRING);
+    return GetKeys(object, OWN_ONLY, ALL_PROPERTIES, CONVERT_TO_STRING);
   }
 
   // Computes the enumerable keys for a JSObject. Used for implementing
@@ -1973,6 +1946,12 @@
       Handle<JSReceiver> object, KeyCollectionType type, PropertyFilter filter,
       GetKeysConversion keys_conversion = KEEP_NUMBERS);
 
+  MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnValues(
+      Handle<JSReceiver> object, PropertyFilter filter);
+
+  MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnEntries(
+      Handle<JSReceiver> object, PropertyFilter filter);
+
   // Layout description.
   static const int kPropertiesOffset = HeapObject::kHeaderSize;
   static const int kHeaderSize = HeapObject::kHeaderSize + kPointerSize;
@@ -2038,6 +2017,7 @@
   // ElementsKind.
   inline bool HasFastHoleyElements();
   inline bool HasSloppyArgumentsElements();
+  inline bool HasStringWrapperElements();
   inline bool HasDictionaryElements();
 
   inline bool HasFixedTypedArrayElements();
@@ -2055,6 +2035,8 @@
 
   inline bool HasFastArgumentsElements();
   inline bool HasSlowArgumentsElements();
+  inline bool HasFastStringWrapperElements();
+  inline bool HasSlowStringWrapperElements();
   inline SeededNumberDictionary* element_dictionary();  // Gets slow elements.
 
   // Requires: HasFastElements().
@@ -2073,38 +2055,37 @@
                                                    uint32_t limit);
 
   MUST_USE_RESULT static Maybe<bool> SetPropertyWithInterceptor(
-      LookupIterator* it, Handle<Object> value);
+      LookupIterator* it, ShouldThrow should_throw, Handle<Object> value);
 
-  // SetLocalPropertyIgnoreAttributes converts callbacks to fields. We need to
-  // grant an exemption to ExecutableAccessor callbacks in some cases.
-  enum ExecutableAccessorInfoHandling { DEFAULT_HANDLING, DONT_FORCE_FIELD };
+  // The API currently still wants DefineOwnPropertyIgnoreAttributes to convert
+  // AccessorInfo objects to data fields. We allow FORCE_FIELD as an exception
+  // to the default behavior that calls the setter.
+  enum AccessorInfoHandling { FORCE_FIELD, DONT_FORCE_FIELD };
 
   MUST_USE_RESULT static MaybeHandle<Object> DefineOwnPropertyIgnoreAttributes(
       LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
-      ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+      AccessorInfoHandling handling = DONT_FORCE_FIELD);
 
   MUST_USE_RESULT static Maybe<bool> DefineOwnPropertyIgnoreAttributes(
       LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
       ShouldThrow should_throw,
-      ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+      AccessorInfoHandling handling = DONT_FORCE_FIELD);
 
   MUST_USE_RESULT static MaybeHandle<Object> SetOwnPropertyIgnoreAttributes(
       Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
-      PropertyAttributes attributes,
-      ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+      PropertyAttributes attributes);
 
   MUST_USE_RESULT static MaybeHandle<Object> SetOwnElementIgnoreAttributes(
       Handle<JSObject> object, uint32_t index, Handle<Object> value,
-      PropertyAttributes attributes,
-      ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+      PropertyAttributes attributes);
 
   // Equivalent to one of the above depending on whether |name| can be converted
   // to an array index.
   MUST_USE_RESULT static MaybeHandle<Object>
-  DefinePropertyOrElementIgnoreAttributes(
-      Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
-      PropertyAttributes attributes = NONE,
-      ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+  DefinePropertyOrElementIgnoreAttributes(Handle<JSObject> object,
+                                          Handle<Name> name,
+                                          Handle<Object> value,
+                                          PropertyAttributes attributes = NONE);
 
   // Adds or reconfigures a property to attributes NONE. It will fail when it
   // cannot.
@@ -2204,12 +2185,11 @@
 
   // Accessors for hidden properties object.
   //
-  // Hidden properties are not own properties of the object itself.
-  // Instead they are stored in an auxiliary structure kept as an own
-  // property with a special name Heap::hidden_string(). But if the
-  // receiver is a JSGlobalProxy then the auxiliary object is a property
-  // of its prototype, and if it's a detached proxy, then you can't have
-  // hidden properties.
+  // Hidden properties are not own properties of the object itself.  Instead
+  // they are stored in an auxiliary structure kept as an own property with a
+  // special name Heap::hidden_properties_symbol(). But if the receiver is a
+  // JSGlobalProxy then the auxiliary object is a property of its prototype, and
+  // if it's a detached proxy, then you can't have hidden properties.
 
   // Sets a hidden property on this object. Returns this object if successful,
   // undefined if called on a detached proxy.
@@ -2307,8 +2287,7 @@
                                     KeyAccumulator* keys,
                                     PropertyFilter filter);
 
-  static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
-                                                bool cache_result);
+  static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
 
   // Returns a new map with all transitions dropped from the object's current
   // map and the ElementsKind set.
@@ -2355,6 +2334,8 @@
   inline void FastPropertyAtPut(FieldIndex index, Object* value);
   inline void RawFastPropertyAtPut(FieldIndex index, Object* value);
   inline void RawFastDoublePropertyAtPut(FieldIndex index, double value);
+  inline void WriteToField(int descriptor, PropertyDetails details,
+                           Object* value);
   inline void WriteToField(int descriptor, Object* value);
 
   // Access to in object properties.
@@ -2397,6 +2378,10 @@
       Handle<JSObject> object,
       AllocationSiteUsageContext* site_context,
       DeepCopyHints hints = kNoHints);
+  // Deep copies given object with special handling for JSFunctions which
+  // 1) must be Api functions and 2) are not copied but left as is.
+  MUST_USE_RESULT static MaybeHandle<JSObject> DeepCopyApiBoilerplate(
+      Handle<JSObject> object);
   MUST_USE_RESULT static MaybeHandle<JSObject> DeepWalk(
       Handle<JSObject> object,
       AllocationSiteCreationContext* site_context);
@@ -2505,11 +2490,6 @@
   friend class JSReceiver;
   friend class Object;
 
-  static void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map);
-  static void MigrateFastToSlow(Handle<JSObject> object,
-                                Handle<Map> new_map,
-                                int expected_additional_properties);
-
   // Used from Object::GetProperty().
   MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithFailedAccessCheck(
       LookupIterator* it);
@@ -2524,7 +2504,7 @@
                               PropertyAttributes attributes);
 
   MUST_USE_RESULT static Maybe<bool> DeletePropertyWithInterceptor(
-      LookupIterator* it);
+      LookupIterator* it, ShouldThrow should_throw);
 
   bool ReferencesObjectFromElements(FixedArray* elements,
                                     ElementsKind kind,
@@ -2566,6 +2546,111 @@
 };
 
 
+// JSAccessorPropertyDescriptor is just a JSObject with a specific initial
+// map. This initial map adds in-object properties for "get", "set",
+// "enumerable" and "configurable" properties, as assigned by the
+// FromPropertyDescriptor function for regular accessor properties.
+class JSAccessorPropertyDescriptor: public JSObject {
+ public:
+  // Offsets of object fields.
+  static const int kGetOffset = JSObject::kHeaderSize;
+  static const int kSetOffset = kGetOffset + kPointerSize;
+  static const int kEnumerableOffset = kSetOffset + kPointerSize;
+  static const int kConfigurableOffset = kEnumerableOffset + kPointerSize;
+  static const int kSize = kConfigurableOffset + kPointerSize;
+  // Indices of in-object properties.
+  static const int kGetIndex = 0;
+  static const int kSetIndex = 1;
+  static const int kEnumerableIndex = 2;
+  static const int kConfigurableIndex = 3;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSAccessorPropertyDescriptor);
+};
+
+
+// JSDataPropertyDescriptor is just a JSObject with a specific initial map.
+// This initial map adds in-object properties for "value", "writable",
+// "enumerable" and "configurable" properties, as assigned by the
+// FromPropertyDescriptor function for regular data properties.
+class JSDataPropertyDescriptor: public JSObject {
+ public:
+  // Offsets of object fields.
+  static const int kValueOffset = JSObject::kHeaderSize;
+  static const int kWritableOffset = kValueOffset + kPointerSize;
+  static const int kEnumerableOffset = kWritableOffset + kPointerSize;
+  static const int kConfigurableOffset = kEnumerableOffset + kPointerSize;
+  static const int kSize = kConfigurableOffset + kPointerSize;
+  // Indices of in-object properties.
+  static const int kValueIndex = 0;
+  static const int kWritableIndex = 1;
+  static const int kEnumerableIndex = 2;
+  static const int kConfigurableIndex = 3;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataPropertyDescriptor);
+};
+
+
+// JSIteratorResult is just a JSObject with a specific initial map.
+// This initial map adds in-object properties for "done" and "value",
+// as specified by ES6 section 25.1.1.3 The IteratorResult Interface
+class JSIteratorResult: public JSObject {
+ public:
+  // Offsets of object fields.
+  static const int kValueOffset = JSObject::kHeaderSize;
+  static const int kDoneOffset = kValueOffset + kPointerSize;
+  static const int kSize = kDoneOffset + kPointerSize;
+  // Indices of in-object properties.
+  static const int kValueIndex = 0;
+  static const int kDoneIndex = 1;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSIteratorResult);
+};
+
+
+// Common superclass for JSSloppyArgumentsObject and JSStrictArgumentsObject.
+class JSArgumentsObject: public JSObject {
+ public:
+  // Offsets of object fields.
+  static const int kLengthOffset = JSObject::kHeaderSize;
+  static const int kHeaderSize = kLengthOffset + kPointerSize;
+  // Indices of in-object properties.
+  static const int kLengthIndex = 0;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSArgumentsObject);
+};
+
+
+// JSSloppyArgumentsObject is just a JSObject with specific initial map.
+// This initial map adds in-object properties for "length" and "callee".
+class JSSloppyArgumentsObject: public JSArgumentsObject {
+ public:
+  // Offsets of object fields.
+  static const int kCalleeOffset = JSArgumentsObject::kHeaderSize;
+  static const int kSize = kCalleeOffset + kPointerSize;
+  // Indices of in-object properties.
+  static const int kCalleeIndex = 1;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSSloppyArgumentsObject);
+};
+
+
+// JSStrictArgumentsObject is just a JSObject with specific initial map.
+// This initial map adds an in-object property for "length".
+class JSStrictArgumentsObject: public JSArgumentsObject {
+ public:
+  // Offsets of object fields.
+  static const int kSize = JSArgumentsObject::kHeaderSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSStrictArgumentsObject);
+};
+
+
 // Common superclass for FixedArrays that allow implementations to share
 // common accessors and some code paths.
 class FixedArrayBase: public HeapObject {
@@ -2596,7 +2681,8 @@
  public:
   // Setter and getter for elements.
   inline Object* get(int index) const;
-  static inline Handle<Object> get(Handle<FixedArray> array, int index);
+  static inline Handle<Object> get(FixedArray* array, int index,
+                                   Isolate* isolate);
   // Setter that uses write barrier.
   inline void set(int index, Object* value);
   inline bool is_the_hole(int index);
@@ -2683,7 +2769,8 @@
   // Setter and getter for elements.
   inline double get_scalar(int index);
   inline uint64_t get_representation(int index);
-  static inline Handle<Object> get(Handle<FixedDoubleArray> array, int index);
+  static inline Handle<Object> get(FixedDoubleArray* array, int index,
+                                   Isolate* isolate);
   inline void set(int index, double value);
   inline void set_the_hole(int index);
 
@@ -2864,8 +2951,6 @@
                            Isolate* isolate, Handle<FixedArray> new_cache,
                            Handle<FixedArray> new_index_cache);
 
-  bool CanHoldValue(int descriptor, Object* value);
-
   // Accessors for fetching instance descriptor at descriptor number.
   inline Name* GetKey(int descriptor_number);
   inline Object** GetKeySlot(int descriptor_number);
@@ -2878,7 +2963,7 @@
   inline PropertyDetails GetDetails(int descriptor_number);
   inline PropertyType GetType(int descriptor_number);
   inline int GetFieldIndex(int descriptor_number);
-  inline HeapType* GetFieldType(int descriptor_number);
+  FieldType* GetFieldType(int descriptor_number);
   inline Object* GetConstant(int descriptor_number);
   inline Object* GetCallbacksObject(int descriptor_number);
   inline AccessorDescriptor* GetCallbacks(int descriptor_number);
@@ -2917,15 +3002,15 @@
 
   // As the above, but uses DescriptorLookupCache and updates it when
   // necessary.
-  INLINE(int SearchWithCache(Name* name, Map* map));
+  INLINE(int SearchWithCache(Isolate* isolate, Name* name, Map* map));
 
   bool IsEqualUpTo(DescriptorArray* desc, int nof_descriptors);
 
   // Allocates a DescriptorArray, but returns the singleton
   // empty descriptor array object if number_of_descriptors is 0.
-  static Handle<DescriptorArray> Allocate(Isolate* isolate,
-                                          int number_of_descriptors,
-                                          int slack = 0);
+  static Handle<DescriptorArray> Allocate(
+      Isolate* isolate, int number_of_descriptors, int slack,
+      PretenureFlag pretenure = NOT_TENURED);
 
   DECLARE_CAST(DescriptorArray)
 
@@ -4086,10 +4171,6 @@
   // exposed to the user in a debugger.
   bool LocalIsSynthetic(int var);
 
-  String* StrongModeFreeVariableName(int var);
-  int StrongModeFreeVariableStartPosition(int var);
-  int StrongModeFreeVariableEndPosition(int var);
-
   // Lookup support for serialized scope info. Returns the
   // the stack slot index for a given slot name if the slot is
   // present; otherwise returns a value < 0. The name must be an internalized
@@ -4159,8 +4240,7 @@
   V(ParameterCount)                          \
   V(StackLocalCount)                         \
   V(ContextLocalCount)                       \
-  V(ContextGlobalCount)                      \
-  V(StrongModeFreeVariableCount)
+  V(ContextGlobalCount)
 
 #define FIELD_ACCESSORS(name)       \
   inline void Set##name(int value); \
@@ -4202,15 +4282,10 @@
   //    the context locals in ContextLocalNameEntries. One slot is used per
   //    context local, so in total this part occupies ContextLocalCount()
   //    slots in the array.
-  // 6. StrongModeFreeVariableNameEntries:
-  //    Stores the names of strong mode free variables.
-  // 7. StrongModeFreeVariablePositionEntries:
-  //    Stores the locations (start and end position) of strong mode free
-  //    variables.
-  // 8. RecieverEntryIndex:
+  // 6. RecieverEntryIndex:
   //    If the scope binds a "this" value, one slot is reserved to hold the
   //    context or stack slot index for the variable.
-  // 9. FunctionNameEntryIndex:
+  // 7. FunctionNameEntryIndex:
   //    If the scope belongs to a named function expression this part contains
   //    information about the function variable. It always occupies two array
   //    slots:  a. The name of the function variable.
@@ -4222,8 +4297,6 @@
   int ContextGlobalNameEntriesIndex();
   int ContextLocalInfoEntriesIndex();
   int ContextGlobalInfoEntriesIndex();
-  int StrongModeFreeVariableNameEntriesIndex();
-  int StrongModeFreeVariablePositionEntriesIndex();
   int ReceiverEntryIndex();
   int FunctionNameEntryIndex();
 
@@ -4286,7 +4359,7 @@
 
   DECLARE_CAST(NormalizedMapCache)
 
-  static inline bool IsNormalizedMapCache(const Object* obj);
+  static inline bool IsNormalizedMapCache(const HeapObject* obj);
 
   DECLARE_VERIFIER(NormalizedMapCache)
  private:
@@ -4377,26 +4450,46 @@
   inline int parameter_count() const;
   inline void set_parameter_count(int number_of_parameters);
 
+  // Accessors for profiling count.
+  inline int interrupt_budget() const;
+  inline void set_interrupt_budget(int interrupt_budget);
+
   // Accessors for the constant pool.
   DECL_ACCESSORS(constant_pool, FixedArray)
 
+  // Accessors for handler table containing offsets of exception handlers.
+  DECL_ACCESSORS(handler_table, FixedArray)
+
+  // Accessors for source position table containing mappings between byte code
+  // offset and source position.
+  DECL_ACCESSORS(source_position_table, FixedArray)
+
   DECLARE_CAST(BytecodeArray)
 
   // Dispatched behavior.
   inline int BytecodeArraySize();
 
+  inline int instruction_size();
+
+  int SourcePosition(int offset);
+  int SourceStatementPosition(int offset);
+
   DECLARE_PRINTER(BytecodeArray)
   DECLARE_VERIFIER(BytecodeArray)
 
   void Disassemble(std::ostream& os);
 
-  // Layout description.
-  static const int kFrameSizeOffset = FixedArrayBase::kHeaderSize;
-  static const int kParameterSizeOffset = kFrameSizeOffset + kIntSize;
-  static const int kConstantPoolOffset = kParameterSizeOffset + kIntSize;
-  static const int kHeaderSize = kConstantPoolOffset + kPointerSize;
+  void CopyBytecodesTo(BytecodeArray* to);
 
-  static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+  // Layout description.
+  static const int kConstantPoolOffset = FixedArrayBase::kHeaderSize;
+  static const int kHandlerTableOffset = kConstantPoolOffset + kPointerSize;
+  static const int kSourcePositionTableOffset =
+      kHandlerTableOffset + kPointerSize;
+  static const int kFrameSizeOffset = kSourcePositionTableOffset + kPointerSize;
+  static const int kParameterSizeOffset = kFrameSizeOffset + kIntSize;
+  static const int kInterruptBudgetOffset = kParameterSizeOffset + kIntSize;
+  static const int kHeaderSize = kInterruptBudgetOffset + kIntSize;
 
   // Maximal memory consumption for a single BytecodeArray.
   static const int kMaxSize = 512 * MB;
@@ -4509,7 +4602,7 @@
   DECLARE_CAST(FixedTypedArray<Traits>)
 
   inline ElementType get_scalar(int index);
-  static inline Handle<Object> get(Handle<FixedTypedArray> array, int index);
+  static inline Handle<Object> get(FixedTypedArray* array, int index);
   inline void set(int index, ElementType value);
 
   static inline ElementType from_int(int value);
@@ -4653,7 +4746,7 @@
 
   DECLARE_CAST(DeoptimizationOutputData)
 
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+#ifdef ENABLE_DISASSEMBLER
   void DeoptimizationOutputDataPrint(std::ostream& os);  // NOLINT
 #endif
 };
@@ -4698,7 +4791,7 @@
 // 1) Based on ranges: Used for unoptimized code. Contains one entry per
 //    exception handler and a range representing the try-block covered by that
 //    handler. Layout looks as follows:
-//      [ range-start , range-end , handler-offset , stack-depth ]
+//      [ range-start , range-end , handler-offset , handler-data ]
 // 2) Based on return addresses: Used for turbofanned code. Contains one entry
 //    per call-site that could throw an exception. Layout looks as follows:
 //      [ return-address-offset , handler-offset ]
@@ -4709,29 +4802,41 @@
   // undecidable it is merely an approximation (e.g. useful for debugger).
   enum CatchPrediction { UNCAUGHT, CAUGHT };
 
-  // Accessors for handler table based on ranges.
+  // Getters for handler table based on ranges.
+  inline int GetRangeStart(int index) const;
+  inline int GetRangeEnd(int index) const;
+  inline int GetRangeHandler(int index) const;
+  inline int GetRangeData(int index) const;
+
+  // Setters for handler table based on ranges.
   inline void SetRangeStart(int index, int value);
   inline void SetRangeEnd(int index, int value);
   inline void SetRangeHandler(int index, int offset, CatchPrediction pred);
-  inline void SetRangeDepth(int index, int value);
+  inline void SetRangeData(int index, int value);
 
-  // Accessors for handler table based on return addresses.
+  // Setters for handler table based on return addresses.
   inline void SetReturnOffset(int index, int value);
   inline void SetReturnHandler(int index, int offset, CatchPrediction pred);
 
   // Lookup handler in a table based on ranges.
-  int LookupRange(int pc_offset, int* stack_depth, CatchPrediction* prediction);
+  int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
 
   // Lookup handler in a table based on return addresses.
   int LookupReturn(int pc_offset, CatchPrediction* prediction);
 
+  // Returns the conservative catch predication.
+  inline CatchPrediction GetRangePrediction(int index) const;
+
+  // Returns the number of entries in the table.
+  inline int NumberOfRangeEntries() const;
+
   // Returns the required length of the underlying fixed array.
   static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
   static int LengthForReturn(int entries) { return entries * kReturnEntrySize; }
 
   DECLARE_CAST(HandlerTable)
 
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+#ifdef ENABLE_DISASSEMBLER
   void HandlerTableRangePrint(std::ostream& os);   // NOLINT
   void HandlerTableReturnPrint(std::ostream& os);  // NOLINT
 #endif
@@ -4741,7 +4846,7 @@
   static const int kRangeStartIndex = 0;
   static const int kRangeEndIndex = 1;
   static const int kRangeHandlerIndex = 2;
-  static const int kRangeDepthIndex = 3;
+  static const int kRangeDataIndex = 3;
   static const int kRangeEntrySize = 4;
 
   // Layout description for handler table based on return addresses.
@@ -4897,7 +5002,6 @@
   inline bool is_to_boolean_ic_stub();
   inline bool is_keyed_stub();
   inline bool is_optimized_code();
-  inline bool is_interpreter_entry_trampoline();
   inline bool embeds_maps_weakly();
 
   inline bool IsCodeStubOrIC();
@@ -4906,6 +5010,10 @@
   inline void set_raw_kind_specific_flags1(int value);
   inline void set_raw_kind_specific_flags2(int value);
 
+  // Testers for interpreter builtins.
+  inline bool is_interpreter_entry_trampoline();
+  inline bool is_interpreter_enter_bytecode_dispatch();
+
   // [is_crankshafted]: For kind STUB or ICs, tells whether or not a code
   // object was generated by either the hydrogen or the TurboFan optimizing
   // compiler (but it may not be an optimized function).
@@ -5096,8 +5204,8 @@
   inline int ExecutableSize();
 
   // Locating source position.
-  int SourcePosition(Address pc);
-  int SourceStatementPosition(Address pc);
+  int SourcePosition(int code_offset);
+  int SourceStatementPosition(int code_offset);
 
   DECLARE_CAST(Code)
 
@@ -5294,6 +5402,16 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
 };
 
+class AbstractCode : public HeapObject {
+ public:
+  int SourcePosition(int offset);
+  int SourceStatementPosition(int offset);
+
+  DECLARE_CAST(AbstractCode)
+  inline int Size();
+  inline Code* GetCode();
+  inline BytecodeArray* GetBytecodeArray();
+};
 
 // Dependent code is a singly linked list of fixed arrays. Each array contains
 // code objects in weak cells for one dependent group. The suffix of the array
@@ -5475,7 +5593,7 @@
   STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
   class DictionaryMap : public BitField<bool, 20, 1> {};
   class OwnsDescriptors : public BitField<bool, 21, 1> {};
-  class IsHiddenPrototype : public BitField<bool, 22, 1> {};
+  class HasHiddenPrototype : public BitField<bool, 22, 1> {};
   class Deprecated : public BitField<bool, 23, 1> {};
   class IsUnstable : public BitField<bool, 24, 1> {};
   class IsMigrationTarget : public BitField<bool, 25, 1> {};
@@ -5487,7 +5605,11 @@
   // Builtins::kJSConstructStubGeneric stub.
   // This counter is used for in-object slack tracking.
   // The in-object slack tracking is considered enabled when the counter is
-  // non zero.
+  // non zero. The counter only has a valid count for initial maps. For
+  // transitioned maps only kNoSlackTracking has a meaning, namely that inobject
+  // slack tracking already finished for the transition tree. Any other value
+  // indicates that either inobject slack tracking is still in progress, or that
+  // the map isn't part of the transition tree anymore.
   class ConstructionCounter : public BitField<int, 29, 3> {};
   static const int kSlackTrackingCounterStart = 7;
   static const int kSlackTrackingCounterEnd = 1;
@@ -5551,13 +5673,12 @@
 
   // Tells whether the instance has a [[Construct]] internal method.
   // This property is implemented according to ES6, section 7.2.4.
-  inline void set_is_constructor();
+  inline void set_is_constructor(bool value);
   inline bool is_constructor() const;
 
-  // Tells whether the instance with this map should be ignored by the
-  // Object.getPrototypeOf() function and the __proto__ accessor.
-  inline void set_is_hidden_prototype();
-  inline bool is_hidden_prototype() const;
+  // Tells whether the instance with this map has a hidden prototype.
+  inline void set_has_hidden_prototype(bool value);
+  inline bool has_hidden_prototype() const;
 
   // Records and queries whether the instance has a named interceptor.
   inline void set_has_named_interceptor();
@@ -5606,6 +5727,7 @@
   inline bool has_fast_double_elements();
   inline bool has_fast_elements();
   inline bool has_sloppy_arguments_elements();
+  inline bool has_fast_string_wrapper_elements();
   inline bool has_fixed_typed_array_elements();
   inline bool has_dictionary_elements();
 
@@ -5656,17 +5778,17 @@
                               int* old_number_of_fields);
   // TODO(ishell): moveit!
   static Handle<Map> GeneralizeAllFieldRepresentations(Handle<Map> map);
-  MUST_USE_RESULT static Handle<HeapType> GeneralizeFieldType(
-      Representation rep1, Handle<HeapType> type1, Representation rep2,
-      Handle<HeapType> type2, Isolate* isolate);
+  MUST_USE_RESULT static Handle<FieldType> GeneralizeFieldType(
+      Representation rep1, Handle<FieldType> type1, Representation rep2,
+      Handle<FieldType> type2, Isolate* isolate);
   static void GeneralizeFieldType(Handle<Map> map, int modify_index,
                                   Representation new_representation,
-                                  Handle<HeapType> new_field_type);
+                                  Handle<FieldType> new_field_type);
   static Handle<Map> ReconfigureProperty(Handle<Map> map, int modify_index,
                                          PropertyKind new_kind,
                                          PropertyAttributes new_attributes,
                                          Representation new_representation,
-                                         Handle<HeapType> new_field_type,
+                                         Handle<FieldType> new_field_type,
                                          StoreMode store_mode);
   static Handle<Map> CopyGeneralizeAllRepresentations(
       Handle<Map> map, int modify_index, StoreMode store_mode,
@@ -5752,6 +5874,10 @@
 
   inline Cell* RetrieveDescriptorsPointer();
 
+  // Checks whether all properties are stored either in the map or on the object
+  // (inobject, properties, or elements backing store), requiring no special
+  // checks.
+  bool OnlyHasSimpleProperties();
   inline int EnumLength();
   inline void SetEnumLength(int length);
 
@@ -5788,11 +5914,8 @@
                                           TransitionFlag flag);
 
   MUST_USE_RESULT static MaybeHandle<Map> CopyWithField(
-      Handle<Map> map,
-      Handle<Name> name,
-      Handle<HeapType> type,
-      PropertyAttributes attributes,
-      Representation representation,
+      Handle<Map> map, Handle<Name> name, Handle<FieldType> type,
+      PropertyAttributes attributes, Representation representation,
       TransitionFlag flag);
 
   MUST_USE_RESULT static MaybeHandle<Map> CopyWithConstant(
@@ -6126,16 +6249,14 @@
 
   void PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
                             PropertyAttributes attributes);
-  void PrintGeneralization(FILE* file,
-                           const char* reason,
-                           int modify_index,
-                           int split,
-                           int descriptors,
-                           bool constant_to_field,
+  void PrintGeneralization(FILE* file, const char* reason, int modify_index,
+                           int split, int descriptors, bool constant_to_field,
                            Representation old_representation,
                            Representation new_representation,
-                           HeapType* old_field_type,
-                           HeapType* new_field_type);
+                           MaybeHandle<FieldType> old_field_type,
+                           MaybeHandle<Object> old_value,
+                           MaybeHandle<FieldType> new_field_type,
+                           MaybeHandle<Object> new_value);
 
   static const int kFastPropertiesSoftLimit = 12;
   static const int kMaxFastProperties = 128;
@@ -6287,7 +6408,7 @@
   // [line_ends]: FixedArray of line ends positions.
   DECL_ACCESSORS(line_ends, Object)
 
-  // [eval_from_shared]: for eval scripts the shared funcion info for the
+  // [eval_from_shared]: for eval scripts the shared function info for the
   // function from which eval was called.
   DECL_ACCESSORS(eval_from_shared, Object)
 
@@ -6335,17 +6456,17 @@
   // resource is accessible. Otherwise, always return true.
   inline bool HasValidSource();
 
-  // Convert code position into column number.
-  static int GetColumnNumber(Handle<Script> script, int code_pos);
+  // Convert code offset into column number.
+  static int GetColumnNumber(Handle<Script> script, int code_offset);
 
-  // Convert code position into (zero-based) line number.
+  // Convert code offset into (zero-based) line number.
   // The non-handlified version does not allocate, but may be much slower.
-  static int GetLineNumber(Handle<Script> script, int code_pos);
+  static int GetLineNumber(Handle<Script> script, int code_offset);
   int GetLineNumber(int code_pos);
 
   static Handle<Object> GetNameOrSourceURL(Handle<Script> script);
 
-  // Init line_ends array with code positions of line ends inside script source.
+  // Init line_ends array with source code positions of line ends.
   static void InitLineEnds(Handle<Script> script);
 
   // Get the JS object wrapping the given script; create it if none exists.
@@ -6414,37 +6535,40 @@
 //
 // Installation of ids for the selected builtin functions is handled
 // by the bootstrapper.
-#define FUNCTIONS_WITH_ID_LIST(V)                   \
-  V(Array.prototype, indexOf, ArrayIndexOf)         \
-  V(Array.prototype, lastIndexOf, ArrayLastIndexOf) \
-  V(Array.prototype, push, ArrayPush)               \
-  V(Array.prototype, pop, ArrayPop)                 \
-  V(Array.prototype, shift, ArrayShift)             \
-  V(Function.prototype, apply, FunctionApply)       \
-  V(Function.prototype, call, FunctionCall)         \
-  V(String.prototype, charCodeAt, StringCharCodeAt) \
-  V(String.prototype, charAt, StringCharAt)         \
-  V(String, fromCharCode, StringFromCharCode)       \
-  V(Math, random, MathRandom)                       \
-  V(Math, floor, MathFloor)                         \
-  V(Math, round, MathRound)                         \
-  V(Math, ceil, MathCeil)                           \
-  V(Math, abs, MathAbs)                             \
-  V(Math, log, MathLog)                             \
-  V(Math, exp, MathExp)                             \
-  V(Math, sqrt, MathSqrt)                           \
-  V(Math, pow, MathPow)                             \
-  V(Math, max, MathMax)                             \
-  V(Math, min, MathMin)                             \
-  V(Math, cos, MathCos)                             \
-  V(Math, sin, MathSin)                             \
-  V(Math, tan, MathTan)                             \
-  V(Math, acos, MathAcos)                           \
-  V(Math, asin, MathAsin)                           \
-  V(Math, atan, MathAtan)                           \
-  V(Math, atan2, MathAtan2)                         \
-  V(Math, imul, MathImul)                           \
-  V(Math, clz32, MathClz32)                         \
+#define FUNCTIONS_WITH_ID_LIST(V)                     \
+  V(Array.prototype, indexOf, ArrayIndexOf)           \
+  V(Array.prototype, lastIndexOf, ArrayLastIndexOf)   \
+  V(Array.prototype, push, ArrayPush)                 \
+  V(Array.prototype, pop, ArrayPop)                   \
+  V(Array.prototype, shift, ArrayShift)               \
+  V(Function.prototype, apply, FunctionApply)         \
+  V(Function.prototype, call, FunctionCall)           \
+  V(String.prototype, charCodeAt, StringCharCodeAt)   \
+  V(String.prototype, charAt, StringCharAt)           \
+  V(String.prototype, concat, StringConcat)           \
+  V(String.prototype, toLowerCase, StringToLowerCase) \
+  V(String.prototype, toUpperCase, StringToUpperCase) \
+  V(String, fromCharCode, StringFromCharCode)         \
+  V(Math, random, MathRandom)                         \
+  V(Math, floor, MathFloor)                           \
+  V(Math, round, MathRound)                           \
+  V(Math, ceil, MathCeil)                             \
+  V(Math, abs, MathAbs)                               \
+  V(Math, log, MathLog)                               \
+  V(Math, exp, MathExp)                               \
+  V(Math, sqrt, MathSqrt)                             \
+  V(Math, pow, MathPow)                               \
+  V(Math, max, MathMax)                               \
+  V(Math, min, MathMin)                               \
+  V(Math, cos, MathCos)                               \
+  V(Math, sin, MathSin)                               \
+  V(Math, tan, MathTan)                               \
+  V(Math, acos, MathAcos)                             \
+  V(Math, asin, MathAsin)                             \
+  V(Math, atan, MathAtan)                             \
+  V(Math, atan2, MathAtan2)                           \
+  V(Math, imul, MathImul)                             \
+  V(Math, clz32, MathClz32)                           \
   V(Math, fround, MathFround)
 
 #define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
@@ -6481,6 +6605,7 @@
 
   // [code]: Function code.
   DECL_ACCESSORS(code, Code)
+
   inline void ReplaceCode(Code* code);
 
   // [optimized_code_map]: Map from native context to optimized code
@@ -6657,8 +6782,8 @@
   inline int end_position() const;
   inline void set_end_position(int end_position);
 
-  // Is this function a function expression in the source code.
-  DECL_BOOLEAN_ACCESSORS(is_expression)
+  // Is this function a named function expression in the source code.
+  DECL_BOOLEAN_ACCESSORS(is_named_expression)
 
   // Is this function a top-level function (scripts, evals).
   DECL_BOOLEAN_ACCESSORS(is_toplevel)
@@ -6726,9 +6851,10 @@
   // see a binding for it.
   DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
 
-  // Indicates that the function is anonymous (the name field can be set
-  // through the API, which does not change this flag).
-  DECL_BOOLEAN_ACCESSORS(is_anonymous)
+  // Indicates that the function is either an anonymous expression
+  // or an arrow function (the name field can be set through the API,
+  // which does not change this flag).
+  DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
 
   // Is this a function or top-level/eval code.
   DECL_BOOLEAN_ACCESSORS(is_function)
@@ -6748,8 +6874,11 @@
   // Indicates that this function is a concise method.
   DECL_BOOLEAN_ACCESSORS(is_concise_method)
 
-  // Indicates that this function is an accessor (getter or setter).
-  DECL_BOOLEAN_ACCESSORS(is_accessor_function)
+  // Indicates that this function is a getter.
+  DECL_BOOLEAN_ACCESSORS(is_getter_function)
+
+  // Indicates that this function is a setter.
+  DECL_BOOLEAN_ACCESSORS(is_setter_function)
 
   // Indicates that this function is a default constructor.
   DECL_BOOLEAN_ACCESSORS(is_default_constructor)
@@ -6763,6 +6892,9 @@
   // Indicates that the the shared function info has never been compiled before.
   DECL_BOOLEAN_ACCESSORS(never_compiled)
 
+  // Whether this function was created from a FunctionDeclaration.
+  DECL_BOOLEAN_ACCESSORS(is_declaration)
+
   inline FunctionKind kind();
   inline void set_kind(FunctionKind kind);
 
@@ -6996,10 +7128,10 @@
   // Bit positions in start_position_and_type.
   // The source code start position is in the 30 most significant bits of
   // the start_position_and_type field.
-  static const int kIsExpressionBit    = 0;
-  static const int kIsTopLevelBit      = 1;
+  static const int kIsNamedExpressionBit = 0;
+  static const int kIsTopLevelBit = 1;
   static const int kStartPositionShift = 2;
-  static const int kStartPositionMask  = ~((1 << kStartPositionShift) - 1);
+  static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
 
   // Bit positions in compiler_hints.
   enum CompilerHints {
@@ -7016,7 +7148,7 @@
     kHasDuplicateParameters,
     kForceInline,
     kIsAsmFunction,
-    kIsAnonymous,
+    kIsAnonymousExpression,
     kNameShouldPrintAsAnonymous,
     kIsFunction,
     kDontCrankshaft,
@@ -7026,14 +7158,15 @@
     kIsArrow = kFunctionKind,
     kIsGenerator,
     kIsConciseMethod,
-    kIsAccessorFunction,
     kIsDefaultConstructor,
     kIsSubclassConstructor,
     kIsBaseConstructor,
-    kIsInObjectLiteral,
+    kIsGetterFunction,
+    kIsSetterFunction,
     // byte 3
     kDeserialized,
     kNeverCompiled,
+    kIsDeclaration,
     kCompilerHintsCount,  // Pseudo entry
   };
   // Add hints for other modes when they're added.
@@ -7047,11 +7180,11 @@
   ASSERT_FUNCTION_KIND_ORDER(kArrowFunction, kIsArrow);
   ASSERT_FUNCTION_KIND_ORDER(kGeneratorFunction, kIsGenerator);
   ASSERT_FUNCTION_KIND_ORDER(kConciseMethod, kIsConciseMethod);
-  ASSERT_FUNCTION_KIND_ORDER(kAccessorFunction, kIsAccessorFunction);
   ASSERT_FUNCTION_KIND_ORDER(kDefaultConstructor, kIsDefaultConstructor);
   ASSERT_FUNCTION_KIND_ORDER(kSubclassConstructor, kIsSubclassConstructor);
   ASSERT_FUNCTION_KIND_ORDER(kBaseConstructor, kIsBaseConstructor);
-  ASSERT_FUNCTION_KIND_ORDER(kInObjectLiteral, kIsInObjectLiteral);
+  ASSERT_FUNCTION_KIND_ORDER(kGetterFunction, kIsGetterFunction);
+  ASSERT_FUNCTION_KIND_ORDER(kSetterFunction, kIsSetterFunction);
 #undef ASSERT_FUNCTION_KIND_ORDER
 
   class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 8> {};
@@ -7159,6 +7292,9 @@
   // [receiver]: The receiver of the suspended computation.
   DECL_ACCESSORS(receiver, Object)
 
+  // [input]: The most recent input value.
+  DECL_ACCESSORS(input, Object)
+
   // [continuation]: Offset into code of continuation.
   //
   // A positive offset indicates a suspended generator.  The special
@@ -7187,12 +7323,13 @@
   static const int kFunctionOffset = JSObject::kHeaderSize;
   static const int kContextOffset = kFunctionOffset + kPointerSize;
   static const int kReceiverOffset = kContextOffset + kPointerSize;
-  static const int kContinuationOffset = kReceiverOffset + kPointerSize;
+  static const int kInputOffset = kReceiverOffset + kPointerSize;
+  static const int kContinuationOffset = kInputOffset + kPointerSize;
   static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
   static const int kSize = kOperandStackOffset + kPointerSize;
 
   // Resume mode, for use by runtime functions.
-  enum ResumeMode { NEXT, THROW };
+  enum ResumeMode { NEXT, RETURN, THROW };
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
@@ -7244,12 +7381,6 @@
   // arguments to any call to the wrapped function.
   DECL_ACCESSORS(bound_arguments, FixedArray)
 
-  // [creation_context]: The native context in which the function was bound.
-  // TODO(bmeurer, verwaest): Can we (mis)use (unused) constructor field in
-  // the Map instead of putting this into the object? Only required for
-  // JSReceiver::GetCreationContext() anyway.
-  DECL_ACCESSORS(creation_context, Context)
-
   static MaybeHandle<Context> GetFunctionRealm(
       Handle<JSBoundFunction> function);
 
@@ -7267,9 +7398,7 @@
   static const int kBoundTargetFunctionOffset = JSObject::kHeaderSize;
   static const int kBoundThisOffset = kBoundTargetFunctionOffset + kPointerSize;
   static const int kBoundArgumentsOffset = kBoundThisOffset + kPointerSize;
-  static const int kCreationContextOffset =
-      kBoundArgumentsOffset + kPointerSize;
-  static const int kLengthOffset = kCreationContextOffset + kPointerSize;
+  static const int kLengthOffset = kBoundArgumentsOffset + kPointerSize;
   static const int kNameOffset = kLengthOffset + kPointerSize;
   static const int kSize = kNameOffset + kPointerSize;
 
@@ -7431,6 +7560,12 @@
   // debug name.
   static Handle<String> GetName(Handle<JSFunction> function);
 
+  // ES6 section 9.2.11 SetFunctionName
+  // Because of the way this abstract operation is used in the spec,
+  // it should never fail.
+  static void SetName(Handle<JSFunction> function, Handle<Name> name,
+                      Handle<String> prefix);
+
   // The function's displayName if it is set, otherwise name if it is
   // configured, otherwise shared function info
   // debug name.
@@ -8233,9 +8368,8 @@
   static const int kPointerFieldsEndOffset = kWeakNextOffset;
 
   // For other visitors, use the fixed body descriptor below.
-  typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
-                              kDependentCodeOffset + kPointerSize,
-                              kSize> BodyDescriptor;
+  typedef FixedBodyDescriptor<HeapObject::kHeaderSize, kSize, kSize>
+      BodyDescriptor;
 
  private:
   inline bool PretenuringDecisionMade();
@@ -8253,6 +8387,7 @@
 
   inline bool IsValid();
   inline AllocationSite* GetAllocationSite();
+  inline Address GetAllocationSiteUnchecked();
 
   DECLARE_PRINTER(AllocationMemento)
   DECLARE_VERIFIER(AllocationMemento)
@@ -8440,10 +8575,7 @@
   // If the name is private, it can only name own properties.
   inline bool IsPrivate();
 
-  // If the name is a non-flat string, this method returns a flat version of the
-  // string. Otherwise it'll just return the input.
-  static inline Handle<Name> Flatten(Handle<Name> name,
-                                     PretenureFlag pretenure = NOT_TENURED);
+  inline bool IsUniqueName() const;
 
   // Return a string version of this name that is converted according to the
   // rules described in ES6 section 9.2.11.
@@ -8836,9 +8968,7 @@
   static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
   static const int kMaxUtf16CodeUnit = 0xffff;
   static const uint32_t kMaxUtf16CodeUnitU = kMaxUtf16CodeUnit;
-
-  // Value of hash field containing computed hash equal to zero.
-  static const int kEmptyStringHash = kIsNotArrayIndexMask;
+  static const uc32 kMaxCodePoint = 0x10ffff;
 
   // Maximal string length.
   static const int kMaxLength = (1 << 28) - 16;
@@ -9162,9 +9292,6 @@
   static const int kResourceDataOffset = kResourceOffset + kPointerSize;
   static const int kSize = kResourceDataOffset + kPointerSize;
 
-  static const int kMaxShortLength =
-      (kShortSize - SeqString::kHeaderSize) / kCharSize;
-
   // Return whether external string is short (data pointer is not cached).
   inline bool is_short();
 
@@ -9411,7 +9538,7 @@
   static const byte kNotBooleanMask = ~1;
   static const byte kTheHole = 2;
   static const byte kNull = 3;
-  static const byte kArgumentMarker = 4;
+  static const byte kArgumentsMarker = 4;
   static const byte kUndefined = 5;
   static const byte kUninitialized = 6;
   static const byte kOther = 7;
@@ -9604,7 +9731,7 @@
   // ES6 9.5.8
   MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
       Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
-      Handle<Object> receiver, LanguageMode language_mode);
+      Handle<Object> receiver);
 
   // ES6 9.5.9
   MUST_USE_RESULT static Maybe<bool> SetProperty(Handle<JSProxy> proxy,
@@ -9617,12 +9744,6 @@
   MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
       Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode);
 
-  // ES6 9.5.11
-  MUST_USE_RESULT static Maybe<bool> Enumerate(Isolate* isolate,
-                                               Handle<JSReceiver> receiver,
-                                               Handle<JSProxy> proxy,
-                                               KeyAccumulator* accumulator);
-
   // ES6 9.5.12
   MUST_USE_RESULT static Maybe<bool> OwnPropertyKeys(
       Isolate* isolate, Handle<JSReceiver> receiver, Handle<JSProxy> proxy,
@@ -9648,12 +9769,12 @@
 
   static Handle<Smi> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
 
- private:
-  static Maybe<bool> AddPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
+  static Maybe<bool> SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
                                         Handle<Symbol> private_name,
                                         PropertyDescriptor* desc,
                                         ShouldThrow should_throw);
 
+ private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
 };
 
@@ -9811,40 +9932,6 @@
 };
 
 
-// ES6 section 25.1.1.3 The IteratorResult Interface
-class JSIteratorResult final : public JSObject {
- public:
-  // [done]: This is the result status of an iterator next method call.  If the
-  // end of the iterator was reached done is true.  If the end was not reached
-  // done is false and a [value] is available.
-  DECL_ACCESSORS(done, Object)
-
-  // [value]: If [done] is false, this is the current iteration element value.
-  // If [done] is true, this is the return value of the iterator, if it supplied
-  // one.  If the iterator does not have a return value, value is undefined.
-  // In that case, the value property may be absent from the conforming object
-  // if it does not inherit an explicit value property.
-  DECL_ACCESSORS(value, Object)
-
-  // Dispatched behavior.
-  DECLARE_PRINTER(JSIteratorResult)
-  DECLARE_VERIFIER(JSIteratorResult)
-
-  DECLARE_CAST(JSIteratorResult)
-
-  static const int kValueOffset = JSObject::kHeaderSize;
-  static const int kDoneOffset = kValueOffset + kPointerSize;
-  static const int kSize = kDoneOffset + kPointerSize;
-
-  // Indices of in-object properties.
-  static const int kValueIndex = 0;
-  static const int kDoneIndex = 1;
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(JSIteratorResult);
-};
-
-
 // Base class for both JSWeakMap and JSWeakSet
 class JSWeakCollection: public JSObject {
  public:
@@ -10195,11 +10282,26 @@
 };
 
 
+// An accessor must have a getter, but can have no setter.
+//
+// When setting a property, V8 searches accessors in prototypes.
+// If an accessor was found and it does not have a setter,
+// the request is ignored.
+//
+// If the accessor in the prototype has the READ_ONLY property attribute, then
+// a new value is added to the derived object when the property is set.
+// This shadows the accessor in the prototype.
 class AccessorInfo: public Struct {
  public:
   DECL_ACCESSORS(name, Object)
   DECL_INT_ACCESSORS(flag)
   DECL_ACCESSORS(expected_receiver_type, Object)
+  DECL_ACCESSORS(getter, Object)
+  DECL_ACCESSORS(setter, Object)
+  DECL_ACCESSORS(data, Object)
+
+  // Dispatched behavior.
+  DECLARE_PRINTER(AccessorInfo)
 
   inline bool all_can_read();
   inline void set_all_can_read(bool value);
@@ -10233,7 +10335,11 @@
   static const int kNameOffset = HeapObject::kHeaderSize;
   static const int kFlagOffset = kNameOffset + kPointerSize;
   static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize;
-  static const int kSize = kExpectedReceiverTypeOffset + kPointerSize;
+  static const int kGetterOffset = kExpectedReceiverTypeOffset + kPointerSize;
+  static const int kSetterOffset = kGetterOffset + kPointerSize;
+  static const int kDataOffset = kSetterOffset + kPointerSize;
+  static const int kSize = kDataOffset + kPointerSize;
+
 
  private:
   inline bool HasExpectedReceiverType();
@@ -10248,39 +10354,6 @@
 };
 
 
-// An accessor must have a getter, but can have no setter.
-//
-// When setting a property, V8 searches accessors in prototypes.
-// If an accessor was found and it does not have a setter,
-// the request is ignored.
-//
-// If the accessor in the prototype has the READ_ONLY property attribute, then
-// a new value is added to the derived object when the property is set.
-// This shadows the accessor in the prototype.
-class ExecutableAccessorInfo: public AccessorInfo {
- public:
-  DECL_ACCESSORS(getter, Object)
-  DECL_ACCESSORS(setter, Object)
-  DECL_ACCESSORS(data, Object)
-
-  DECLARE_CAST(ExecutableAccessorInfo)
-
-  // Dispatched behavior.
-  DECLARE_PRINTER(ExecutableAccessorInfo)
-  DECLARE_VERIFIER(ExecutableAccessorInfo)
-
-  static const int kGetterOffset = AccessorInfo::kSize;
-  static const int kSetterOffset = kGetterOffset + kPointerSize;
-  static const int kDataOffset = kSetterOffset + kPointerSize;
-  static const int kSize = kDataOffset + kPointerSize;
-
-  static void ClearSetter(Handle<ExecutableAccessorInfo> info);
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutableAccessorInfo);
-};
-
-
 // Support for JavaScript accessors: A pair of a getter and a setter. Each
 // accessor can either be
 //   * a pointer to a JavaScript function or proxy: a real accessor
@@ -10300,7 +10373,8 @@
   inline void set(AccessorComponent component, Object* value);
 
   // Note: Returns undefined instead in case of a hole.
-  Object* GetComponent(AccessorComponent component);
+  static Handle<Object> GetComponent(Handle<AccessorPair> accessor_pair,
+                                     AccessorComponent component);
 
   // Set both components, skipping arguments which are a JavaScript null.
   inline void SetComponents(Object* getter, Object* setter);
@@ -10418,15 +10492,16 @@
 class TemplateInfo: public Struct {
  public:
   DECL_ACCESSORS(tag, Object)
-  inline int number_of_properties() const;
-  inline void set_number_of_properties(int value);
+  DECL_ACCESSORS(serial_number, Object)
+  DECL_INT_ACCESSORS(number_of_properties)
   DECL_ACCESSORS(property_list, Object)
   DECL_ACCESSORS(property_accessors, Object)
 
   DECLARE_VERIFIER(TemplateInfo)
 
   static const int kTagOffset = HeapObject::kHeaderSize;
-  static const int kNumberOfProperties = kTagOffset + kPointerSize;
+  static const int kSerialNumberOffset = kTagOffset + kPointerSize;
+  static const int kNumberOfProperties = kSerialNumberOffset + kPointerSize;
   static const int kPropertyListOffset = kNumberOfProperties + kPointerSize;
   static const int kPropertyAccessorsOffset =
       kPropertyListOffset + kPointerSize;
@@ -10441,7 +10516,6 @@
 
 class FunctionTemplateInfo: public TemplateInfo {
  public:
-  DECL_ACCESSORS(serial_number, Object)
   DECL_ACCESSORS(call_code, Object)
   DECL_ACCESSORS(prototype_template, Object)
   DECL_ACCESSORS(parent_template, Object)
@@ -10475,8 +10549,7 @@
   DECLARE_PRINTER(FunctionTemplateInfo)
   DECLARE_VERIFIER(FunctionTemplateInfo)
 
-  static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
-  static const int kCallCodeOffset = kSerialNumberOffset + kPointerSize;
+  static const int kCallCodeOffset = TemplateInfo::kHeaderSize;
   static const int kPrototypeTemplateOffset =
       kCallCodeOffset + kPointerSize;
   static const int kParentTemplateOffset =
@@ -10545,30 +10618,33 @@
   DECL_ACCESSORS(shared, SharedFunctionInfo)
   // Code object for the patched code. This code object is the code object
   // currently active for the function.
-  DECL_ACCESSORS(code, Code)
+  DECL_ACCESSORS(abstract_code, AbstractCode)
   // Fixed array holding status information for each active break point.
   DECL_ACCESSORS(break_points, FixedArray)
 
-  // Check if there is a break point at a code position.
-  bool HasBreakPoint(int code_position);
-  // Get the break point info object for a code position.
-  Object* GetBreakPointInfo(int code_position);
+  // Check if there is a break point at a code offset.
+  bool HasBreakPoint(int code_offset);
+  // Get the break point info object for a code offset.
+  Object* GetBreakPointInfo(int code_offset);
   // Clear a break point.
-  static void ClearBreakPoint(Handle<DebugInfo> debug_info,
-                              int code_position,
+  static void ClearBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
                               Handle<Object> break_point_object);
   // Set a break point.
-  static void SetBreakPoint(Handle<DebugInfo> debug_info, int code_position,
+  static void SetBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
                             int source_position, int statement_position,
                             Handle<Object> break_point_object);
-  // Get the break point objects for a code position.
-  Handle<Object> GetBreakPointObjects(int code_position);
+  // Get the break point objects for a code offset.
+  Handle<Object> GetBreakPointObjects(int code_offset);
   // Find the break point info holding this break point object.
   static Handle<Object> FindBreakPointInfo(Handle<DebugInfo> debug_info,
                                            Handle<Object> break_point_object);
   // Get the number of break points for this function.
   int GetBreakPointCount();
 
+  static Smi* uninitialized() { return Smi::FromInt(0); }
+
+  inline BytecodeArray* original_bytecode_array();
+
   DECLARE_CAST(DebugInfo)
 
   // Dispatched behavior.
@@ -10576,8 +10652,8 @@
   DECLARE_VERIFIER(DebugInfo)
 
   static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
-  static const int kCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
-  static const int kBreakPointsStateIndex = kCodeIndex + kPointerSize;
+  static const int kAbstractCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
+  static const int kBreakPointsStateIndex = kAbstractCodeIndex + kPointerSize;
   static const int kSize = kBreakPointsStateIndex + kPointerSize;
 
   static const int kEstimatedNofBreakPointsInFunction = 16;
@@ -10585,8 +10661,8 @@
  private:
   static const int kNoBreakPointInfo = -1;
 
-  // Lookup the index in the break_points array for a code position.
-  int GetBreakPointInfoIndex(int code_position);
+  // Lookup the index in the break_points array for a code offset.
+  int GetBreakPointInfoIndex(int code_offset);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(DebugInfo);
 };
@@ -10597,8 +10673,8 @@
 // position with one or more break points.
 class BreakPointInfo: public Struct {
  public:
-  // The position in the code for the break point.
-  DECL_INT_ACCESSORS(code_position)
+  // The code offset for the break point.
+  DECL_INT_ACCESSORS(code_offset)
   // The position in the source for the break position.
   DECL_INT_ACCESSORS(source_position)
   // The position in the source for the last statement before this break
@@ -10616,7 +10692,7 @@
   // Check if break point info has this break point object.
   static bool HasBreakPointObject(Handle<BreakPointInfo> info,
                                   Handle<Object> break_point_object);
-  // Get the number of break points for this code position.
+  // Get the number of break points for this code offset.
   int GetBreakPointCount();
 
   DECLARE_CAST(BreakPointInfo)
@@ -10625,8 +10701,8 @@
   DECLARE_PRINTER(BreakPointInfo)
   DECLARE_VERIFIER(BreakPointInfo)
 
-  static const int kCodePositionIndex = Struct::kHeaderSize;
-  static const int kSourcePositionIndex = kCodePositionIndex + kPointerSize;
+  static const int kCodeOffsetIndex = Struct::kHeaderSize;
+  static const int kSourcePositionIndex = kCodeOffsetIndex + kPointerSize;
   static const int kStatementPositionIndex =
       kSourcePositionIndex + kPointerSize;
   static const int kBreakPointObjectsIndex =
@@ -10654,6 +10730,7 @@
   V(kDebug, "debug", "(Debugger)")                                         \
   V(kCompilationCache, "compilationcache", "(Compilation cache)")          \
   V(kHandleScope, "handlescope", "(Handle scope)")                         \
+  V(kDispatchTable, "dispatchtable", "(Dispatch table)")                   \
   V(kBuiltins, "builtins", "(Builtins)")                                   \
   V(kGlobalHandles, "globalhandles", "(Global handles)")                   \
   V(kEternalHandles, "eternalhandles", "(Eternal handles)")                \
diff --git a/src/optimizing-compile-dispatcher.cc b/src/optimizing-compile-dispatcher.cc
index 7062db6..4836b9b 100644
--- a/src/optimizing-compile-dispatcher.cc
+++ b/src/optimizing-compile-dispatcher.cc
@@ -7,6 +7,7 @@
 #include "src/base/atomicops.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/isolate.h"
+#include "src/tracing/trace-event.h"
 #include "src/v8.h"
 
 namespace v8 {
@@ -59,6 +60,7 @@
         isolate_->optimizing_compile_dispatcher();
     {
       TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
+      TRACE_EVENT0("v8", "V8.RecompileConcurrent");
 
       if (dispatcher->recompilation_delay_ != 0) {
         base::OS::Sleep(base::TimeDelta::FromMilliseconds(
@@ -244,9 +246,9 @@
         }
         DisposeOptimizedCompileJob(job, false);
       } else {
-        Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
+        MaybeHandle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
         function->ReplaceCode(code.is_null() ? function->shared()->code()
-                                             : *code);
+                                             : *code.ToHandleChecked());
       }
     }
   }
diff --git a/src/ostreams.cc b/src/ostreams.cc
index a7a67f5..120db25 100644
--- a/src/ostreams.cc
+++ b/src/ostreams.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/ostreams.h"
+#include "src/objects.h"
 
 #if V8_OS_WIN
 #if _MSC_VER < 1900
@@ -60,6 +61,16 @@
   return os << buf;
 }
 
+
+std::ostream& PrintUC32(std::ostream& os, int32_t c, bool (*pred)(uint16_t)) {
+  if (c <= String::kMaxUtf16CodeUnit) {
+    return PrintUC16(os, static_cast<uint16_t>(c), pred);
+  }
+  char buf[13];
+  snprintf(buf, sizeof(buf), "\\u{%06x}", c);
+  return os << buf;
+}
+
 }  // namespace
 
 
@@ -81,5 +92,10 @@
   return PrintUC16(os, c.value, IsPrint);
 }
 
+
+std::ostream& operator<<(std::ostream& os, const AsUC32& c) {
+  return PrintUC32(os, c.value, IsPrint);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ostreams.h b/src/ostreams.h
index 56f4aa7..1c2f38a 100644
--- a/src/ostreams.h
+++ b/src/ostreams.h
@@ -50,6 +50,12 @@
 };
 
 
+struct AsUC32 {
+  explicit AsUC32(int32_t v) : value(v) {}
+  int32_t value;
+};
+
+
 struct AsReversiblyEscapedUC16 {
   explicit AsReversiblyEscapedUC16(uint16_t v) : value(v) {}
   uint16_t value;
@@ -73,6 +79,10 @@
 // of printable ASCII range.
 std::ostream& operator<<(std::ostream& os, const AsUC16& c);
 
+// Writes the given character to the output escaping everything outside
+// of printable ASCII range.
+std::ostream& operator<<(std::ostream& os, const AsUC32& c);
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/parsing/expression-classifier.h b/src/parsing/expression-classifier.h
index 96ccf87..fa1a2f9 100644
--- a/src/parsing/expression-classifier.h
+++ b/src/parsing/expression-classifier.h
@@ -13,6 +13,7 @@
 namespace internal {
 
 
+template <typename Traits>
 class ExpressionClassifier {
  public:
   struct Error {
@@ -55,15 +56,25 @@
 
   enum FunctionProperties { NonSimpleParameter = 1 << 0 };
 
-  ExpressionClassifier()
-      : invalid_productions_(0),
+  explicit ExpressionClassifier(const Traits* t)
+      : zone_(t->zone()),
+        non_patterns_to_rewrite_(t->GetNonPatternList()),
+        invalid_productions_(0),
         function_properties_(0),
-        duplicate_finder_(nullptr) {}
+        duplicate_finder_(nullptr) {
+    non_pattern_begin_ = non_patterns_to_rewrite_->length();
+  }
 
-  explicit ExpressionClassifier(DuplicateFinder* duplicate_finder)
-      : invalid_productions_(0),
+  ExpressionClassifier(const Traits* t, DuplicateFinder* duplicate_finder)
+      : zone_(t->zone()),
+        non_patterns_to_rewrite_(t->GetNonPatternList()),
+        invalid_productions_(0),
         function_properties_(0),
-        duplicate_finder_(duplicate_finder) {}
+        duplicate_finder_(duplicate_finder) {
+    non_pattern_begin_ = non_patterns_to_rewrite_->length();
+  }
+
+  ~ExpressionClassifier() { Discard(); }
 
   bool is_valid(unsigned productions) const {
     return (invalid_productions_ & productions) == 0;
@@ -281,12 +292,14 @@
     assignment_pattern_error_ = Error();
   }
 
-  void Accumulate(const ExpressionClassifier& inner,
-                  unsigned productions = StandardProductions) {
+  void Accumulate(ExpressionClassifier* inner,
+                  unsigned productions = StandardProductions,
+                  bool merge_non_patterns = true) {
+    if (merge_non_patterns) MergeNonPatterns(inner);
     // Propagate errors from inner, but don't overwrite already recorded
     // errors.
     unsigned non_arrow_inner_invalid_productions =
-        inner.invalid_productions_ & ~ArrowFormalParametersProduction;
+        inner->invalid_productions_ & ~ArrowFormalParametersProduction;
     if (non_arrow_inner_invalid_productions == 0) return;
     unsigned non_arrow_productions =
         productions & ~ArrowFormalParametersProduction;
@@ -296,27 +309,27 @@
     if (errors != 0) {
       invalid_productions_ |= errors;
       if (errors & ExpressionProduction)
-        expression_error_ = inner.expression_error_;
+        expression_error_ = inner->expression_error_;
       if (errors & FormalParameterInitializerProduction)
         formal_parameter_initializer_error_ =
-            inner.formal_parameter_initializer_error_;
+            inner->formal_parameter_initializer_error_;
       if (errors & BindingPatternProduction)
-        binding_pattern_error_ = inner.binding_pattern_error_;
+        binding_pattern_error_ = inner->binding_pattern_error_;
       if (errors & AssignmentPatternProduction)
-        assignment_pattern_error_ = inner.assignment_pattern_error_;
+        assignment_pattern_error_ = inner->assignment_pattern_error_;
       if (errors & DistinctFormalParametersProduction)
         duplicate_formal_parameter_error_ =
-            inner.duplicate_formal_parameter_error_;
+            inner->duplicate_formal_parameter_error_;
       if (errors & StrictModeFormalParametersProduction)
         strict_mode_formal_parameter_error_ =
-            inner.strict_mode_formal_parameter_error_;
+            inner->strict_mode_formal_parameter_error_;
       if (errors & StrongModeFormalParametersProduction)
         strong_mode_formal_parameter_error_ =
-            inner.strong_mode_formal_parameter_error_;
+            inner->strong_mode_formal_parameter_error_;
       if (errors & LetPatternProduction)
-        let_pattern_error_ = inner.let_pattern_error_;
+        let_pattern_error_ = inner->let_pattern_error_;
       if (errors & CoverInitializedNameProduction)
-        cover_initialized_name_error_ = inner.cover_initialized_name_error_;
+        cover_initialized_name_error_ = inner->cover_initialized_name_error_;
     }
 
     // As an exception to the above, the result continues to be a valid arrow
@@ -325,16 +338,31 @@
         is_valid_arrow_formal_parameters()) {
       // Also copy function properties if expecting an arrow function
       // parameter.
-      function_properties_ |= inner.function_properties_;
+      function_properties_ |= inner->function_properties_;
 
-      if (!inner.is_valid_binding_pattern()) {
+      if (!inner->is_valid_binding_pattern()) {
         invalid_productions_ |= ArrowFormalParametersProduction;
-        arrow_formal_parameters_error_ = inner.binding_pattern_error_;
+        arrow_formal_parameters_error_ = inner->binding_pattern_error_;
       }
     }
   }
 
+  V8_INLINE int GetNonPatternBegin() const { return non_pattern_begin_; }
+
+  V8_INLINE void Discard() {
+    DCHECK_LE(non_pattern_begin_, non_patterns_to_rewrite_->length());
+    non_patterns_to_rewrite_->Rewind(non_pattern_begin_);
+  }
+
+  V8_INLINE void MergeNonPatterns(ExpressionClassifier* inner) {
+    DCHECK_LE(non_pattern_begin_, inner->non_pattern_begin_);
+    inner->non_pattern_begin_ = inner->non_patterns_to_rewrite_->length();
+  }
+
  private:
+  Zone* zone_;
+  ZoneList<typename Traits::Type::Expression>* non_patterns_to_rewrite_;
+  int non_pattern_begin_;
   unsigned invalid_productions_;
   unsigned function_properties_;
   Error expression_error_;
@@ -350,6 +378,7 @@
   DuplicateFinder* duplicate_finder_;
 };
 
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/parsing/parser-base.h b/src/parsing/parser-base.h
index d9da445..6be19b3 100644
--- a/src/parsing/parser-base.h
+++ b/src/parsing/parser-base.h
@@ -88,6 +88,7 @@
   typedef typename Traits::Type::Literal LiteralT;
   typedef typename Traits::Type::ObjectLiteralProperty ObjectLiteralPropertyT;
   typedef typename Traits::Type::StatementList StatementListT;
+  typedef typename Traits::Type::ExpressionClassifier ExpressionClassifier;
 
   ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
              v8::Extension* extension, AstValueFactory* ast_value_factory,
@@ -116,7 +117,8 @@
         allow_strong_mode_(false),
         allow_legacy_const_(true),
         allow_harmony_do_expressions_(false),
-        allow_harmony_function_name_(false) {}
+        allow_harmony_function_name_(false),
+        allow_harmony_function_sent_(false) {}
 
 #define ALLOW_ACCESSORS(name)                           \
   bool allow_##name() const { return allow_##name##_; } \
@@ -134,6 +136,7 @@
   ALLOW_ACCESSORS(legacy_const);
   ALLOW_ACCESSORS(harmony_do_expressions);
   ALLOW_ACCESSORS(harmony_function_name);
+  ALLOW_ACCESSORS(harmony_function_sent);
 #undef ALLOW_ACCESSORS
 
   uintptr_t stack_limit() const { return stack_limit_; }
@@ -242,11 +245,37 @@
       return destructuring_assignments_to_rewrite_;
     }
 
+    List<ExpressionT>& expressions_in_tail_position() {
+      return expressions_in_tail_position_;
+    }
+    void AddExpressionInTailPosition(ExpressionT expression) {
+      if (collect_expressions_in_tail_position_) {
+        expressions_in_tail_position_.Add(expression);
+      }
+    }
+
+    bool collect_expressions_in_tail_position() const {
+      return collect_expressions_in_tail_position_;
+    }
+    void set_collect_expressions_in_tail_position(bool collect) {
+      collect_expressions_in_tail_position_ = collect;
+    }
+
+    ZoneList<ExpressionT>* non_patterns_to_rewrite() {
+      return &non_patterns_to_rewrite_;
+    }
+
+   private:
     void AddDestructuringAssignment(DestructuringAssignment pair) {
       destructuring_assignments_to_rewrite_.Add(pair);
     }
 
-   private:
+    V8_INLINE Scope* scope() { return *scope_stack_; }
+
+    void AddNonPatternForRewriting(ExpressionT expr) {
+      non_patterns_to_rewrite_.Add(expr, (*scope_stack_)->zone());
+    }
+
     // Used to assign an index to each literal that needs materialization in
     // the function.  Includes regexp literals, and boilerplate for object and
     // array literals.
@@ -276,12 +305,14 @@
     Scope* outer_scope_;
 
     List<DestructuringAssignment> destructuring_assignments_to_rewrite_;
-
-    void RewriteDestructuringAssignments();
+    List<ExpressionT> expressions_in_tail_position_;
+    bool collect_expressions_in_tail_position_;
+    ZoneList<ExpressionT> non_patterns_to_rewrite_;
 
     typename Traits::Type::Factory* factory_;
 
     friend class ParserTraits;
+    friend class PreParserTraits;
     friend class Checkpoint;
   };
 
@@ -436,6 +467,9 @@
            scanner()->is_next_contextual_keyword(keyword);
   }
 
+  void ExpectMetaProperty(Vector<const char> property_name,
+                          const char* full_name, int pos, bool* ok);
+
   void ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
     Expect(Token::IDENTIFIER, ok);
     if (!*ok) return;
@@ -461,6 +495,10 @@
     return false;
   }
 
+  bool PeekInOrOf() {
+    return peek() == Token::IN || PeekContextualKeyword(CStrVector("of"));
+  }
+
   // Checks whether an octal literal was last seen between beg_pos and end_pos.
   // If so, reports an error. Only called for strict mode and template strings.
   void CheckOctalLiteral(int beg_pos, int end_pos,
@@ -563,8 +601,8 @@
       Scanner::Location location, Token::Value token,
       MessageTemplate::Template message = MessageTemplate::kUnexpectedToken);
 
-
-  void ReportClassifierError(const ExpressionClassifier::Error& error) {
+  void ReportClassifierError(
+      const typename ExpressionClassifier::Error& error) {
     Traits::ReportMessageAt(error.location, error.message, error.arg,
                             error.type);
   }
@@ -642,7 +680,7 @@
       // neither a valid binding pattern nor a valid parenthesized formal
       // parameter list, show the "arrow formal parameters" error if the formals
       // started with a parenthesis, and the binding pattern error otherwise.
-      const ExpressionClassifier::Error& error =
+      const typename ExpressionClassifier::Error& error =
           parenthesized_formals ? classifier->arrow_formal_parameters_error()
                                 : classifier->binding_pattern_error();
       ReportClassifierError(error);
@@ -715,10 +753,6 @@
   }
 
   IdentifierT ParseIdentifierName(bool* ok);
-  // Parses an identifier and determines whether or not it is 'get' or 'set'.
-  IdentifierT ParseIdentifierNameOrGetOrSet(bool* is_get, bool* is_set,
-                                            bool* ok);
-
 
   ExpressionT ParseRegExpLiteral(bool seen_equal,
                                  ExpressionClassifier* classifier, bool* ok);
@@ -728,12 +762,9 @@
   ExpressionT ParseExpression(bool accept_IN, bool* ok);
   ExpressionT ParseExpression(bool accept_IN, ExpressionClassifier* classifier,
                               bool* ok);
-  ExpressionT ParseExpression(bool accept_IN, int flags,
-                              ExpressionClassifier* classifier, bool* ok);
   ExpressionT ParseArrayLiteral(ExpressionClassifier* classifier, bool* ok);
   ExpressionT ParsePropertyName(IdentifierT* name, bool* is_get, bool* is_set,
-                                bool* is_static, bool* is_computed_name,
-                                bool* is_identifier, bool* is_escaped_keyword,
+                                bool* is_computed_name,
                                 ExpressionClassifier* classifier, bool* ok);
   ExpressionT ParseObjectLiteral(ExpressionClassifier* classifier, bool* ok);
   ObjectLiteralPropertyT ParsePropertyDefinition(
@@ -744,21 +775,9 @@
       Scanner::Location* first_spread_pos, ExpressionClassifier* classifier,
       bool* ok);
 
-  enum AssignmentExpressionFlags {
-    kIsNormalAssignment = 0,
-    kIsPossiblePatternElement = 1 << 0,
-    kIsPossibleArrowFormals = 1 << 1
-  };
-
-  ExpressionT ParseAssignmentExpression(bool accept_IN, int flags,
-                                        ExpressionClassifier* classifier,
-                                        bool* ok);
   ExpressionT ParseAssignmentExpression(bool accept_IN,
                                         ExpressionClassifier* classifier,
-                                        bool* ok) {
-    return ParseAssignmentExpression(accept_IN, kIsNormalAssignment, classifier,
-                                     ok);
-  }
+                                        bool* ok);
   ExpressionT ParseYieldExpression(ExpressionClassifier* classifier, bool* ok);
   ExpressionT ParseConditionalExpression(bool accept_IN,
                                          ExpressionClassifier* classifier,
@@ -794,9 +813,9 @@
                             ExpressionClassifier* classifier, bool* ok);
   void ParseFormalParameterList(FormalParametersT* parameters,
                                 ExpressionClassifier* classifier, bool* ok);
-  void CheckArityRestrictions(
-      int param_count, FunctionLiteral::ArityRestriction arity_restriction,
-      bool has_rest, int formals_start_pos, int formals_end_pos, bool* ok);
+  void CheckArityRestrictions(int param_count, FunctionKind function_type,
+                              bool has_rest, int formals_start_pos,
+                              int formals_end_pos, bool* ok);
 
   bool IsNextLetKeyword();
 
@@ -829,6 +848,10 @@
     return true;
   }
 
+  bool IsValidPattern(ExpressionT expression) {
+    return expression->IsObjectLiteral() || expression->IsArrayLiteral();
+  }
+
   // Keep track of eval() calls since they disable all local variable
   // optimizations. This checks if expression is an eval call, and if yes,
   // forwards the information to scope.
@@ -932,9 +955,9 @@
   bool allow_legacy_const_;
   bool allow_harmony_do_expressions_;
   bool allow_harmony_function_name_;
+  bool allow_harmony_function_sent_;
 };
 
-
 template <class Traits>
 ParserBase<Traits>::FunctionState::FunctionState(
     FunctionState** function_state_stack, Scope** scope_stack, Scope* scope,
@@ -950,6 +973,8 @@
       outer_function_state_(*function_state_stack),
       scope_stack_(scope_stack),
       outer_scope_(*scope_stack),
+      collect_expressions_in_tail_position_(true),
+      non_patterns_to_rewrite_(0, scope->zone()),
       factory_(factory) {
   *scope_stack_ = scope;
   *function_state_stack = this;
@@ -967,7 +992,6 @@
 void ParserBase<Traits>::GetUnexpectedTokenMessage(
     Token::Value token, MessageTemplate::Template* message, const char** arg,
     MessageTemplate::Template default_) {
-  // Four of the tokens are treated specially
   switch (token) {
     case Token::EOS:
       *message = MessageTemplate::kUnexpectedEOS;
@@ -1037,7 +1061,7 @@
 template <class Traits>
 typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParseIdentifier(
     AllowRestrictedIdentifiers allow_restricted_identifiers, bool* ok) {
-  ExpressionClassifier classifier;
+  ExpressionClassifier classifier(this);
   auto result = ParseAndClassifyIdentifier(&classifier, ok);
   if (!*ok) return Traits::EmptyIdentifier();
 
@@ -1091,10 +1115,8 @@
           scanner()->location(), MessageTemplate::kStrongUndefined);
       if (is_strong(language_mode())) {
         // TODO(dslomov): allow 'undefined' in nested patterns.
-        classifier->RecordBindingPatternError(
-            scanner()->location(), MessageTemplate::kStrongUndefined);
-        classifier->RecordAssignmentPatternError(
-            scanner()->location(), MessageTemplate::kStrongUndefined);
+        classifier->RecordPatternError(scanner()->location(),
+                                       MessageTemplate::kStrongUndefined);
       }
     }
 
@@ -1116,7 +1138,9 @@
       *ok = false;
       return Traits::EmptyIdentifier();
     }
-    if (next == Token::LET) {
+    if (next == Token::LET ||
+        (next == Token::ESCAPED_STRICT_RESERVED_WORD &&
+         scanner()->is_literal_contextual_keyword(CStrVector("let")))) {
       classifier->RecordLetPatternError(scanner()->location(),
                                         MessageTemplate::kLetInLexicalBinding);
     }
@@ -1172,18 +1196,6 @@
 
 
 template <class Traits>
-typename ParserBase<Traits>::IdentifierT
-ParserBase<Traits>::ParseIdentifierNameOrGetOrSet(bool* is_get,
-                                                  bool* is_set,
-                                                  bool* ok) {
-  IdentifierT result = ParseIdentifierName(ok);
-  if (!*ok) return Traits::EmptyIdentifier();
-  scanner()->IsGetOrSet(is_get, is_set);
-  return result;
-}
-
-
-template <class Traits>
 typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
     bool seen_equal, ExpressionClassifier* classifier, bool* ok) {
   int pos = peek_position();
@@ -1269,8 +1281,7 @@
       return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
     case Token::SMI:
     case Token::NUMBER:
-      classifier->RecordBindingPatternError(
-          scanner()->peek_location(), MessageTemplate::kUnexpectedTokenNumber);
+      BindingPatternUnexpectedToken(classifier);
       return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
 
     case Token::IDENTIFIER:
@@ -1286,8 +1297,7 @@
     }
 
     case Token::STRING: {
-      classifier->RecordBindingPatternError(
-          scanner()->peek_location(), MessageTemplate::kUnexpectedTokenString);
+      BindingPatternUnexpectedToken(classifier);
       Consume(Token::STRING);
       return this->ExpressionFromString(beg_pos, scanner(), factory());
     }
@@ -1323,7 +1333,9 @@
       if (!classifier->is_valid_binding_pattern()) {
         ArrowFormalParametersUnexpectedToken(classifier);
       }
-      BindingPatternUnexpectedToken(classifier);
+      classifier->RecordPatternError(scanner()->peek_location(),
+                                     MessageTemplate::kUnexpectedToken,
+                                     Token::String(Token::LPAREN));
       Consume(Token::LPAREN);
       if (Check(Token::RPAREN)) {
         // ()=>x.  The continuation that looks for the => is in
@@ -1331,20 +1343,23 @@
         classifier->RecordExpressionError(scanner()->location(),
                                           MessageTemplate::kUnexpectedToken,
                                           Token::String(Token::RPAREN));
-        classifier->RecordBindingPatternError(scanner()->location(),
-                                              MessageTemplate::kUnexpectedToken,
-                                              Token::String(Token::RPAREN));
         return factory()->NewEmptyParentheses(beg_pos);
       } else if (Check(Token::ELLIPSIS)) {
         // (...x)=>x.  The continuation that looks for the => is in
         // ParseAssignmentExpression.
         int ellipsis_pos = position();
+        int expr_pos = peek_position();
         classifier->RecordExpressionError(scanner()->location(),
                                           MessageTemplate::kUnexpectedToken,
                                           Token::String(Token::ELLIPSIS));
         classifier->RecordNonSimpleParameter();
         ExpressionT expr =
             this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+        if (!this->IsIdentifier(expr) && !IsValidPattern(expr)) {
+          classifier->RecordArrowFormalParametersError(
+              Scanner::Location(ellipsis_pos, scanner()->location().end_pos),
+              MessageTemplate::kInvalidRestParameter);
+        }
         if (peek() == Token::COMMA) {
           ReportMessageAt(scanner()->peek_location(),
                           MessageTemplate::kParamAfterRest);
@@ -1352,17 +1367,13 @@
           return this->EmptyExpression();
         }
         Expect(Token::RPAREN, CHECK_OK);
-        return factory()->NewSpread(expr, ellipsis_pos);
+        return factory()->NewSpread(expr, ellipsis_pos, expr_pos);
       }
       // Heuristically try to detect immediately called functions before
       // seeing the call parentheses.
       parenthesized_function_ = (peek() == Token::FUNCTION);
-      ExpressionT expr = this->ParseExpression(true, kIsPossibleArrowFormals,
-                                               classifier, CHECK_OK);
+      ExpressionT expr = this->ParseExpression(true, classifier, CHECK_OK);
       Expect(Token::RPAREN, CHECK_OK);
-      if (peek() != Token::ARROW) {
-        expr->set_is_parenthesized();
-      }
       return expr;
     }
 
@@ -1390,9 +1401,7 @@
 
     case Token::TEMPLATE_SPAN:
     case Token::TEMPLATE_TAIL:
-      classifier->RecordBindingPatternError(
-          scanner()->peek_location(),
-          MessageTemplate::kUnexpectedTemplateString);
+      BindingPatternUnexpectedToken(classifier);
       return this->ParseTemplateLiteral(Traits::NoTemplateTag(), beg_pos,
                                         classifier, ok);
 
@@ -1423,9 +1432,9 @@
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
     bool accept_IN, bool* ok) {
-  ExpressionClassifier classifier;
+  ExpressionClassifier classifier(this);
   ExpressionT result = ParseExpression(accept_IN, &classifier, CHECK_OK);
-  result = Traits::RewriteNonPattern(result, &classifier, CHECK_OK);
+  Traits::RewriteNonPattern(&classifier, CHECK_OK);
   return result;
 }
 
@@ -1433,21 +1442,14 @@
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
     bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
-  return ParseExpression(accept_IN, kIsNormalAssignment, classifier, ok);
-}
-
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
-    bool accept_IN, int flags, ExpressionClassifier* classifier, bool* ok) {
   // Expression ::
   //   AssignmentExpression
   //   Expression ',' AssignmentExpression
 
-  ExpressionClassifier binding_classifier;
-  ExpressionT result = this->ParseAssignmentExpression(
-      accept_IN, flags, &binding_classifier, CHECK_OK);
-  classifier->Accumulate(binding_classifier,
+  ExpressionClassifier binding_classifier(this);
+  ExpressionT result =
+      this->ParseAssignmentExpression(accept_IN, &binding_classifier, CHECK_OK);
+  classifier->Accumulate(&binding_classifier,
                          ExpressionClassifier::AllProductions);
   bool is_simple_parameter_list = this->IsIdentifier(result);
   bool seen_rest = false;
@@ -1469,14 +1471,21 @@
       Consume(Token::ELLIPSIS);
       seen_rest = is_rest = true;
     }
-    int pos = position();
+    int pos = position(), expr_pos = peek_position();
     ExpressionT right = this->ParseAssignmentExpression(
-        accept_IN, flags, &binding_classifier, CHECK_OK);
-    if (is_rest) right = factory()->NewSpread(right, pos);
+        accept_IN, &binding_classifier, CHECK_OK);
+    classifier->Accumulate(&binding_classifier,
+                           ExpressionClassifier::AllProductions);
+    if (is_rest) {
+      if (!this->IsIdentifier(right) && !IsValidPattern(right)) {
+        classifier->RecordArrowFormalParametersError(
+            Scanner::Location(pos, scanner()->location().end_pos),
+            MessageTemplate::kInvalidRestParameter);
+      }
+      right = factory()->NewSpread(right, pos, expr_pos);
+    }
     is_simple_parameter_list =
         is_simple_parameter_list && this->IsIdentifier(right);
-    classifier->Accumulate(binding_classifier,
-                           ExpressionClassifier::AllProductions);
     result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
   }
   if (!is_simple_parameter_list || seen_rest) {
@@ -1511,9 +1520,10 @@
     } else if (peek() == Token::ELLIPSIS) {
       int start_pos = peek_position();
       Consume(Token::ELLIPSIS);
+      int expr_pos = peek_position();
       ExpressionT argument =
           this->ParseAssignmentExpression(true, classifier, CHECK_OK);
-      elem = factory()->NewSpread(argument, start_pos);
+      elem = factory()->NewSpread(argument, start_pos, expr_pos);
 
       if (first_spread_index < 0) {
         first_spread_index = values->length();
@@ -1534,8 +1544,10 @@
             MessageTemplate::kElementAfterRest);
       }
     } else {
-      elem = this->ParseAssignmentExpression(true, kIsPossiblePatternElement,
-                                             classifier, CHECK_OK);
+      int beg_pos = peek_position();
+      elem = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+      CheckDestructuringElement(elem, classifier, beg_pos,
+                                scanner()->location().end_pos);
     }
     values->Add(elem, zone_);
     if (peek() != Token::RBRACK) {
@@ -1547,15 +1559,20 @@
   // Update the scope information before the pre-parsing bailout.
   int literal_index = function_state_->NextMaterializedLiteralIndex();
 
-  return factory()->NewArrayLiteral(values, first_spread_index, literal_index,
-                                    is_strong(language_mode()), pos);
+  ExpressionT result =
+      factory()->NewArrayLiteral(values, first_spread_index, literal_index,
+                                 is_strong(language_mode()), pos);
+  if (first_spread_index >= 0) {
+    result = factory()->NewRewritableExpression(result);
+    Traits::QueueNonPatternForRewriting(result);
+  }
+  return result;
 }
 
 
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
-    IdentifierT* name, bool* is_get, bool* is_set, bool* is_static,
-    bool* is_computed_name, bool* is_identifier, bool* is_escaped_keyword,
+    IdentifierT* name, bool* is_get, bool* is_set, bool* is_computed_name,
     ExpressionClassifier* classifier, bool* ok) {
   Token::Value token = peek();
   int pos = peek_position();
@@ -1588,29 +1605,19 @@
     case Token::LBRACK: {
       *is_computed_name = true;
       Consume(Token::LBRACK);
-      ExpressionClassifier computed_name_classifier;
+      ExpressionClassifier computed_name_classifier(this);
       ExpressionT expression =
           ParseAssignmentExpression(true, &computed_name_classifier, CHECK_OK);
-      expression = Traits::RewriteNonPattern(
-          expression, &computed_name_classifier, CHECK_OK);
-      classifier->Accumulate(computed_name_classifier,
+      Traits::RewriteNonPattern(&computed_name_classifier, CHECK_OK);
+      classifier->Accumulate(&computed_name_classifier,
                              ExpressionClassifier::ExpressionProductions);
       Expect(Token::RBRACK, CHECK_OK);
       return expression;
     }
 
-    case Token::ESCAPED_KEYWORD:
-      *is_escaped_keyword = true;
-      *name = ParseIdentifierNameOrGetOrSet(is_get, is_set, CHECK_OK);
-      break;
-
-    case Token::STATIC:
-      *is_static = true;
-
-    // Fall through.
     default:
-      *is_identifier = true;
-      *name = ParseIdentifierNameOrGetOrSet(is_get, is_set, CHECK_OK);
+      *name = ParseIdentifierName(CHECK_OK);
+      scanner()->IsGetOrSet(is_get, is_set);
       break;
   }
 
@@ -1631,27 +1638,19 @@
   ExpressionT value = this->EmptyExpression();
   bool is_get = false;
   bool is_set = false;
-  bool name_is_static = false;
   bool is_generator = Check(Token::MUL);
 
   Token::Value name_token = peek();
   int next_beg_pos = scanner()->peek_location().beg_pos;
   int next_end_pos = scanner()->peek_location().end_pos;
-  bool is_identifier = false;
-  bool is_escaped_keyword = false;
-  ExpressionT name_expression = ParsePropertyName(
-      name, &is_get, &is_set, &name_is_static, is_computed_name, &is_identifier,
-      &is_escaped_keyword, classifier,
-      CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+  ExpressionT name_expression =
+      ParsePropertyName(name, &is_get, &is_set, is_computed_name, classifier,
+                        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
   if (fni_ != nullptr && !*is_computed_name) {
     this->PushLiteralName(fni_, *name);
   }
 
-  bool escaped_static =
-      is_escaped_keyword &&
-      scanner()->is_literal_contextual_keyword(CStrVector("static"));
-
   if (!in_class && !is_generator) {
     DCHECK(!is_static);
 
@@ -1663,15 +1662,18 @@
                                CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
       }
       Consume(Token::COLON);
+      int beg_pos = peek_position();
       value = this->ParseAssignmentExpression(
-          true, kIsPossiblePatternElement, classifier,
-          CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+          true, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+      CheckDestructuringElement(value, classifier, beg_pos,
+                                scanner()->location().end_pos);
 
       return factory()->NewObjectLiteralProperty(name_expression, value, false,
                                                  *is_computed_name);
     }
 
-    if ((is_identifier || is_escaped_keyword) &&
+    if (Token::IsIdentifier(name_token, language_mode(),
+                            this->is_generator()) &&
         (peek() == Token::COMMA || peek() == Token::RBRACE ||
          peek() == Token::ASSIGN)) {
       // PropertyDefinition
@@ -1680,14 +1682,6 @@
       //
       // CoverInitializedName
       //    IdentifierReference Initializer?
-      if (!Token::IsIdentifier(name_token, language_mode(),
-                               this->is_generator())) {
-        if (!escaped_static) {
-          ReportUnexpectedTokenAt(scanner()->location(), name_token);
-          *ok = false;
-          return this->EmptyObjectLiteralProperty();
-        }
-      }
       if (classifier->duplicate_finder() != nullptr &&
           scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
         classifier->RecordDuplicateFormalParameterError(scanner()->location());
@@ -1703,18 +1697,22 @@
 
       if (peek() == Token::ASSIGN) {
         Consume(Token::ASSIGN);
-        ExpressionClassifier rhs_classifier;
+        ExpressionClassifier rhs_classifier(this);
         ExpressionT rhs = this->ParseAssignmentExpression(
             true, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
-        rhs = Traits::RewriteNonPattern(
-            rhs, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
-        classifier->Accumulate(rhs_classifier,
+        Traits::RewriteNonPattern(&rhs_classifier,
+                                  CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+        classifier->Accumulate(&rhs_classifier,
                                ExpressionClassifier::ExpressionProductions);
         value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
                                          RelocInfo::kNoPosition);
         classifier->RecordCoverInitializedNameError(
             Scanner::Location(next_beg_pos, scanner()->location().end_pos),
             MessageTemplate::kInvalidCoverInitializedName);
+
+        if (allow_harmony_function_name()) {
+          Traits::SetFunctionNameFromIdentifierRef(rhs, lhs);
+        }
       } else {
         value = lhs;
       }
@@ -1725,12 +1723,6 @@
     }
   }
 
-  if (in_class && escaped_static && !is_static) {
-    ReportUnexpectedTokenAt(scanner()->location(), name_token);
-    *ok = false;
-    return this->EmptyObjectLiteralProperty();
-  }
-
   // Method definitions are never valid in patterns.
   classifier->RecordPatternError(
       Scanner::Location(next_beg_pos, scanner()->location().end_pos),
@@ -1755,28 +1747,24 @@
                          : FunctionKind::kBaseConstructor;
     }
 
-    if (!in_class) kind = WithObjectLiteralBit(kind);
-
     value = this->ParseFunctionLiteral(
         *name, scanner()->location(), kSkipFunctionNameCheck, kind,
-        RelocInfo::kNoPosition, FunctionLiteral::kAnonymousExpression,
-        FunctionLiteral::kNormalArity, language_mode(),
-        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+        RelocInfo::kNoPosition, FunctionLiteral::kAccessorOrMethod,
+        language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
     return factory()->NewObjectLiteralProperty(name_expression, value,
                                                ObjectLiteralProperty::COMPUTED,
                                                is_static, *is_computed_name);
   }
 
-  if (in_class && name_is_static && !is_static) {
+  if (in_class && name_token == Token::STATIC && !is_static) {
     // ClassElement (static)
     //    'static' MethodDefinition
     *name = this->EmptyIdentifier();
     ObjectLiteralPropertyT property = ParsePropertyDefinition(
         checker, true, has_extends, true, is_computed_name, nullptr, classifier,
         name, ok);
-    property = Traits::RewriteNonPatternObjectLiteralProperty(property,
-                                                              classifier, ok);
+    Traits::RewriteNonPattern(classifier, ok);
     return property;
   }
 
@@ -1789,8 +1777,8 @@
     name_token = peek();
 
     name_expression = ParsePropertyName(
-        name, &dont_care, &dont_care, &dont_care, is_computed_name, &dont_care,
-        &dont_care, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+        name, &dont_care, &dont_care, is_computed_name, classifier,
+        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
     if (!*is_computed_name) {
       checker->CheckProperty(name_token, kAccessorProperty, is_static,
@@ -1798,12 +1786,10 @@
                              CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
     }
 
-    FunctionKind kind = FunctionKind::kAccessorFunction;
-    if (!in_class) kind = WithObjectLiteralBit(kind);
     typename Traits::Type::FunctionLiteral value = this->ParseFunctionLiteral(
-        *name, scanner()->location(), kSkipFunctionNameCheck, kind,
-        RelocInfo::kNoPosition, FunctionLiteral::kAnonymousExpression,
-        is_get ? FunctionLiteral::kGetterArity : FunctionLiteral::kSetterArity,
+        *name, scanner()->location(), kSkipFunctionNameCheck,
+        is_get ? FunctionKind::kGetterFunction : FunctionKind::kSetterFunction,
+        RelocInfo::kNoPosition, FunctionLiteral::kAccessorOrMethod,
         language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
     // Make sure the name expression is a string since we need a Name for
@@ -1913,17 +1899,17 @@
   while (!done) {
     int start_pos = peek_position();
     bool is_spread = Check(Token::ELLIPSIS);
+    int expr_pos = peek_position();
 
     ExpressionT argument = this->ParseAssignmentExpression(
         true, classifier, CHECK_OK_CUSTOM(NullExpressionList));
-    argument = Traits::RewriteNonPattern(argument, classifier,
-                                         CHECK_OK_CUSTOM(NullExpressionList));
+    Traits::RewriteNonPattern(classifier, CHECK_OK_CUSTOM(NullExpressionList));
     if (is_spread) {
       if (!spread_arg.IsValid()) {
         spread_arg.beg_pos = start_pos;
         spread_arg.end_pos = peek_position();
       }
-      argument = factory()->NewSpread(argument, start_pos);
+      argument = factory()->NewSpread(argument, start_pos, expr_pos);
     }
     result->Add(argument, zone_);
 
@@ -1967,7 +1953,7 @@
 // Precedence = 2
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
+ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
                                               ExpressionClassifier* classifier,
                                               bool* ok) {
   // AssignmentExpression ::
@@ -1975,8 +1961,6 @@
   //   ArrowFunction
   //   YieldExpression
   //   LeftHandSideExpression AssignmentOperator AssignmentExpression
-  bool maybe_pattern_element = flags & kIsPossiblePatternElement;
-  bool maybe_arrow_formals = flags & kIsPossibleArrowFormals;
   bool is_destructuring_assignment = false;
   int lhs_beg_pos = peek_position();
 
@@ -1986,7 +1970,8 @@
 
   FuncNameInferrer::State fni_state(fni_);
   ParserBase<Traits>::Checkpoint checkpoint(this);
-  ExpressionClassifier arrow_formals_classifier(classifier->duplicate_finder());
+  ExpressionClassifier arrow_formals_classifier(this,
+                                                classifier->duplicate_finder());
   bool parenthesized_formals = peek() == Token::LPAREN;
   if (!parenthesized_formals) {
     ArrowFormalParametersUnexpectedToken(&arrow_formals_classifier);
@@ -1994,7 +1979,9 @@
   ExpressionT expression = this->ParseConditionalExpression(
       accept_IN, &arrow_formals_classifier, CHECK_OK);
   if (peek() == Token::ARROW) {
-    BindingPatternUnexpectedToken(classifier);
+    classifier->RecordPatternError(scanner()->peek_location(),
+                                   MessageTemplate::kUnexpectedToken,
+                                   Token::String(Token::ARROW));
     ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
                                   parenthesized_formals, CHECK_OK);
     Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
@@ -2022,11 +2009,6 @@
     }
     expression = this->ParseArrowFunctionLiteral(
         accept_IN, parameters, arrow_formals_classifier, CHECK_OK);
-    if (maybe_pattern_element) {
-      classifier->RecordPatternError(
-          Scanner::Location(lhs_beg_pos, scanner()->location().end_pos),
-          MessageTemplate::kInvalidDestructuringTarget);
-    }
 
     if (fni_ != nullptr) fni_->Infer();
 
@@ -2039,44 +2021,42 @@
 
   // "expression" was not itself an arrow function parameter list, but it might
   // form part of one.  Propagate speculative formal parameter error locations.
+  // Do not merge pending non-pattern expressions yet!
   classifier->Accumulate(
-      arrow_formals_classifier,
+      &arrow_formals_classifier,
       ExpressionClassifier::StandardProductions |
-          ExpressionClassifier::FormalParametersProductions |
-          ExpressionClassifier::CoverInitializedNameProduction);
-
-  bool maybe_pattern =
-      (expression->IsObjectLiteral() || expression->IsArrayLiteral()) &&
-      !expression->is_parenthesized();
+      ExpressionClassifier::FormalParametersProductions |
+      ExpressionClassifier::CoverInitializedNameProduction,
+      false);
 
   if (!Token::IsAssignmentOp(peek())) {
     // Parsed conditional expression only (no assignment).
-    if (maybe_pattern_element) {
-      CheckDestructuringElement(expression, classifier, lhs_beg_pos,
-                                scanner()->location().end_pos);
-    }
+    // Now pending non-pattern expressions must be merged.
+    classifier->MergeNonPatterns(&arrow_formals_classifier);
     return expression;
   }
 
+  // Now pending non-pattern expressions must be discarded.
+  arrow_formals_classifier.Discard();
+
   if (!(allow_harmony_destructuring_bind() ||
         allow_harmony_default_parameters())) {
     BindingPatternUnexpectedToken(classifier);
   }
 
-  if (allow_harmony_destructuring_assignment() && maybe_pattern &&
+  if (allow_harmony_destructuring_assignment() && IsValidPattern(expression) &&
       peek() == Token::ASSIGN) {
     classifier->ForgiveCoverInitializedNameError();
     ValidateAssignmentPattern(classifier, CHECK_OK);
     is_destructuring_assignment = true;
-  } else if (maybe_arrow_formals) {
+  } else if (allow_harmony_default_parameters() &&
+             !allow_harmony_destructuring_assignment()) {
+    // TODO(adamk): This branch should be removed once the destructuring
+    // assignment and default parameter flags are removed.
     expression = this->ClassifyAndRewriteReferenceExpression(
         classifier, expression, lhs_beg_pos, scanner()->location().end_pos,
         MessageTemplate::kInvalidLhsInAssignment);
   } else {
-    if (maybe_pattern_element) {
-      CheckDestructuringElement(expression, classifier, lhs_beg_pos,
-                                scanner()->location().end_pos);
-    }
     expression = this->CheckAndRewriteReferenceExpression(
         expression, lhs_beg_pos, scanner()->location().end_pos,
         MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
@@ -2086,20 +2066,20 @@
 
   Token::Value op = Next();  // Get assignment operator.
   if (op != Token::ASSIGN) {
-    classifier->RecordBindingPatternError(scanner()->location(),
-                                          MessageTemplate::kUnexpectedToken,
-                                          Token::String(op));
+    classifier->RecordPatternError(scanner()->location(),
+                                   MessageTemplate::kUnexpectedToken,
+                                   Token::String(op));
   }
   int pos = position();
 
-  ExpressionClassifier rhs_classifier;
+  ExpressionClassifier rhs_classifier(this);
 
   ExpressionT right =
       this->ParseAssignmentExpression(accept_IN, &rhs_classifier, CHECK_OK);
-  right = Traits::RewriteNonPattern(right, &rhs_classifier, CHECK_OK);
+  Traits::RewriteNonPattern(&rhs_classifier, CHECK_OK);
   classifier->Accumulate(
-      rhs_classifier, ExpressionClassifier::ExpressionProductions |
-                          ExpressionClassifier::CoverInitializedNameProduction);
+      &rhs_classifier, ExpressionClassifier::ExpressionProductions |
+                       ExpressionClassifier::CoverInitializedNameProduction);
 
   // TODO(1231235): We try to estimate the set of properties set by
   // constructors. We define a new property whenever there is an
@@ -2110,12 +2090,6 @@
     function_state_->AddProperty();
   }
 
-  if (op != Token::ASSIGN && maybe_pattern_element) {
-    classifier->RecordAssignmentPatternError(
-        Scanner::Location(lhs_beg_pos, scanner()->location().end_pos),
-        MessageTemplate::kInvalidDestructuringTarget);
-  }
-
   this->CheckAssigningFunctionLiteralToProperty(expression, right);
 
   if (fni_ != NULL) {
@@ -2137,7 +2111,7 @@
   ExpressionT result = factory()->NewAssignment(op, expression, right, pos);
 
   if (is_destructuring_assignment) {
-    result = factory()->NewRewritableAssignmentExpression(result);
+    result = factory()->NewRewritableExpression(result);
     Traits::QueueDestructuringAssignmentForRewriting(result);
   }
 
@@ -2179,16 +2153,12 @@
         // Delegating yields require an RHS; fall through.
       default:
         expression = ParseAssignmentExpression(false, classifier, CHECK_OK);
-        expression =
-            Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+        Traits::RewriteNonPattern(classifier, CHECK_OK);
         break;
     }
   }
   if (kind == Yield::kDelegating) {
-    // var iterator = subject[Symbol.iterator]();
-    // Hackily disambiguate o from o.next and o [Symbol.iterator]().
-    // TODO(verwaest): Come up with a better solution.
-    expression = this->GetIterator(expression, factory(), pos + 1);
+    return Traits::RewriteYieldStar(generator_object, expression, pos);
   }
   // Hackily disambiguate o from o.next and o [Symbol.iterator]().
   // TODO(verwaest): Come up with a better solution.
@@ -2213,7 +2183,7 @@
   ExpressionT expression =
       this->ParseBinaryExpression(4, accept_IN, classifier, CHECK_OK);
   if (peek() != Token::CONDITIONAL) return expression;
-  expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+  Traits::RewriteNonPattern(classifier, CHECK_OK);
   ArrowFormalParametersUnexpectedToken(classifier);
   BindingPatternUnexpectedToken(classifier);
   Consume(Token::CONDITIONAL);
@@ -2221,11 +2191,11 @@
   // expressions we always accept the 'in' keyword; see ECMA-262,
   // section 11.12, page 58.
   ExpressionT left = ParseAssignmentExpression(true, classifier, CHECK_OK);
-  left = Traits::RewriteNonPattern(left, classifier, CHECK_OK);
+  Traits::RewriteNonPattern(classifier, CHECK_OK);
   Expect(Token::COLON, CHECK_OK);
   ExpressionT right =
       ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
-  right = Traits::RewriteNonPattern(right, classifier, CHECK_OK);
+  Traits::RewriteNonPattern(classifier, CHECK_OK);
   return factory()->NewConditional(expression, left, right, pos);
 }
 
@@ -2241,7 +2211,7 @@
   for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
     // prec1 >= 4
     while (Precedence(peek(), accept_IN) == prec1) {
-      x = Traits::RewriteNonPattern(x, classifier, CHECK_OK);
+      Traits::RewriteNonPattern(classifier, CHECK_OK);
       BindingPatternUnexpectedToken(classifier);
       ArrowFormalParametersUnexpectedToken(classifier);
       Token::Value op = Next();
@@ -2249,7 +2219,7 @@
       int pos = position();
       ExpressionT y =
           ParseBinaryExpression(prec1 + 1, accept_IN, classifier, CHECK_OK);
-      y = Traits::RewriteNonPattern(y, classifier, CHECK_OK);
+      Traits::RewriteNonPattern(classifier, CHECK_OK);
 
       if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
                                                        factory())) {
@@ -2271,13 +2241,15 @@
           ReportMessageAt(op_location, MessageTemplate::kStrongEqual);
           *ok = false;
           return this->EmptyExpression();
+        } else if (FLAG_harmony_instanceof && cmp == Token::INSTANCEOF) {
+          x = Traits::RewriteInstanceof(x, y, pos);
+        } else {
+          x = factory()->NewCompareOperation(cmp, x, y, pos);
+          if (cmp != op) {
+            // The comparison was negated - add a NOT.
+            x = factory()->NewUnaryOperation(Token::NOT, x, pos);
+          }
         }
-        x = factory()->NewCompareOperation(cmp, x, y, pos);
-        if (cmp != op) {
-          // The comparison was negated - add a NOT.
-          x = factory()->NewUnaryOperation(Token::NOT, x, pos);
-        }
-
       } else {
         // We have a "normal" binary operation.
         x = factory()->NewBinaryOperation(op, x, y, pos);
@@ -2312,7 +2284,7 @@
     op = Next();
     int pos = position();
     ExpressionT expression = ParseUnaryExpression(classifier, CHECK_OK);
-    expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+    Traits::RewriteNonPattern(classifier, CHECK_OK);
 
     if (op == Token::DELETE && is_strict(language_mode())) {
       if (is_strong(language_mode())) {
@@ -2339,7 +2311,7 @@
         expression, beg_pos, scanner()->location().end_pos,
         MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
     this->MarkExpressionAsAssigned(expression);
-    expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+    Traits::RewriteNonPattern(classifier, CHECK_OK);
 
     return factory()->NewCountOperation(op,
                                         true /* prefix */,
@@ -2371,7 +2343,7 @@
         expression, lhs_beg_pos, scanner()->location().end_pos,
         MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
     expression = this->MarkExpressionAsAssigned(expression);
-    expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+    Traits::RewriteNonPattern(classifier, CHECK_OK);
 
     Token::Value next = Next();
     expression =
@@ -2397,19 +2369,20 @@
   while (true) {
     switch (peek()) {
       case Token::LBRACK: {
+        Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
         Consume(Token::LBRACK);
         int pos = position();
         ExpressionT index = ParseExpression(true, classifier, CHECK_OK);
-        index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
+        Traits::RewriteNonPattern(classifier, CHECK_OK);
         result = factory()->NewProperty(result, index, pos);
         Expect(Token::RBRACK, CHECK_OK);
         break;
       }
 
       case Token::LPAREN: {
-        result = Traits::RewriteNonPattern(result, classifier, CHECK_OK);
+        Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
 
@@ -2473,6 +2446,7 @@
       }
 
       case Token::PERIOD: {
+        Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
         Consume(Token::PERIOD);
@@ -2486,6 +2460,7 @@
 
       case Token::TEMPLATE_SPAN:
       case Token::TEMPLATE_TAIL: {
+        Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
         result = ParseTemplateLiteral(result, position(), classifier, CHECK_OK);
@@ -2537,7 +2512,7 @@
     } else {
       result = this->ParseMemberWithNewPrefixesExpression(classifier, CHECK_OK);
     }
-    result = Traits::RewriteNonPattern(result, classifier, CHECK_OK);
+    Traits::RewriteNonPattern(classifier, CHECK_OK);
     if (peek() == Token::LPAREN) {
       // NewExpression with arguments.
       Scanner::Location spread_pos;
@@ -2584,6 +2559,23 @@
 
     Consume(Token::FUNCTION);
     int function_token_position = position();
+
+    if (allow_harmony_function_sent() && peek() == Token::PERIOD) {
+      // function.sent
+      int pos = position();
+      ExpectMetaProperty(CStrVector("sent"), "function.sent", pos, CHECK_OK);
+
+      if (!is_generator()) {
+        // TODO(neis): allow escaping into closures?
+        ReportMessageAt(scanner()->location(),
+                        MessageTemplate::kUnexpectedFunctionSent);
+        *ok = false;
+        return this->EmptyExpression();
+      }
+
+      return this->FunctionSentExpression(scope_, factory(), pos);
+    }
+
     bool is_generator = Check(Token::MUL);
     IdentifierT name = this->EmptyIdentifier();
     bool is_strict_reserved_name = false;
@@ -2602,8 +2594,7 @@
                                 : kFunctionNameValidityUnknown,
         is_generator ? FunctionKind::kGeneratorFunction
                      : FunctionKind::kNormalFunction,
-        function_token_position, function_type, FunctionLiteral::kNormalArity,
-        language_mode(), CHECK_OK);
+        function_token_position, function_type, language_mode(), CHECK_OK);
   } else if (peek() == Token::SUPER) {
     const bool is_new = false;
     result = ParseSuperExpression(is_new, classifier, CHECK_OK);
@@ -2637,7 +2628,7 @@
       Consume(Token::LBRACK);
       int pos = position();
       ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
-      index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
+      Traits::RewriteNonPattern(classifier, CHECK_OK);
       left = factory()->NewProperty(this_expr, index, pos);
       if (fni_ != NULL) {
         this->PushPropertyName(fni_, index);
@@ -2673,7 +2664,7 @@
 
   ExpressionT right =
       this->ParseAssignmentExpression(true, classifier, CHECK_OK);
-  right = Traits::RewriteNonPattern(right, classifier, CHECK_OK);
+  Traits::RewriteNonPattern(classifier, CHECK_OK);
   this->CheckAssigningFunctionLiteralToProperty(left, right);
   function_state_->AddProperty();
   if (fni_ != NULL) {
@@ -2796,13 +2787,26 @@
   return this->EmptyExpression();
 }
 
+template <class Traits>
+void ParserBase<Traits>::ExpectMetaProperty(Vector<const char> property_name,
+                                            const char* full_name, int pos,
+                                            bool* ok) {
+  Consume(Token::PERIOD);
+  ExpectContextualKeyword(property_name, ok);
+  if (!*ok) return;
+  if (scanner()->literal_contains_escapes()) {
+    Traits::ReportMessageAt(
+        Scanner::Location(pos, scanner()->location().end_pos),
+        MessageTemplate::kInvalidEscapedMetaProperty, full_name);
+    *ok = false;
+  }
+}
 
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::ParseNewTargetExpression(bool* ok) {
   int pos = position();
-  Consume(Token::PERIOD);
-  ExpectContextualKeyword(CStrVector("target"), CHECK_OK);
+  ExpectMetaProperty(CStrVector("target"), "new.target", pos, CHECK_OK);
 
   if (!scope_->ReceiverScope()->is_function_scope()) {
     ReportMessageAt(scanner()->location(),
@@ -2824,13 +2828,14 @@
   while (true) {
     switch (peek()) {
       case Token::LBRACK: {
+        Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
 
         Consume(Token::LBRACK);
         int pos = position();
         ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
-        index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
+        Traits::RewriteNonPattern(classifier, CHECK_OK);
         expression = factory()->NewProperty(expression, index, pos);
         if (fni_ != NULL) {
           this->PushPropertyName(fni_, index);
@@ -2839,6 +2844,7 @@
         break;
       }
       case Token::PERIOD: {
+        Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
 
@@ -2854,6 +2860,7 @@
       }
       case Token::TEMPLATE_SPAN:
       case Token::TEMPLATE_TAIL: {
+        Traits::RewriteNonPattern(classifier, CHECK_OK);
         BindingPatternUnexpectedToken(classifier);
         ArrowFormalParametersUnexpectedToken(classifier);
         int pos;
@@ -2908,14 +2915,19 @@
 
   ExpressionT initializer = Traits::EmptyExpression();
   if (!is_rest && allow_harmony_default_parameters() && Check(Token::ASSIGN)) {
-    ExpressionClassifier init_classifier;
+    ExpressionClassifier init_classifier(this);
     initializer = ParseAssignmentExpression(true, &init_classifier, ok);
     if (!*ok) return;
-    initializer = Traits::RewriteNonPattern(initializer, &init_classifier, ok);
+    Traits::RewriteNonPattern(&init_classifier, ok);
     ValidateFormalParameterInitializer(&init_classifier, ok);
     if (!*ok) return;
     parameters->is_simple = false;
+    init_classifier.Discard();
     classifier->RecordNonSimpleParameter();
+
+    if (allow_harmony_function_name()) {
+      Traits::SetFunctionNameFromIdentifierRef(initializer, pattern);
+    }
   }
 
   Traits::AddFormalParameter(parameters, pattern, initializer,
@@ -2972,33 +2984,29 @@
   }
 }
 
-
 template <class Traits>
-void ParserBase<Traits>::CheckArityRestrictions(
-    int param_count, FunctionLiteral::ArityRestriction arity_restriction,
-    bool has_rest, int formals_start_pos, int formals_end_pos, bool* ok) {
-  switch (arity_restriction) {
-    case FunctionLiteral::kGetterArity:
-      if (param_count != 0) {
-        ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
-                        MessageTemplate::kBadGetterArity);
-        *ok = false;
-      }
-      break;
-    case FunctionLiteral::kSetterArity:
-      if (param_count != 1) {
-        ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
-                        MessageTemplate::kBadSetterArity);
-        *ok = false;
-      }
-      if (has_rest) {
-        ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
-                        MessageTemplate::kBadSetterRestParameter);
-        *ok = false;
-      }
-      break;
-    default:
-      break;
+void ParserBase<Traits>::CheckArityRestrictions(int param_count,
+                                                FunctionKind function_kind,
+                                                bool has_rest,
+                                                int formals_start_pos,
+                                                int formals_end_pos, bool* ok) {
+  if (IsGetterFunction(function_kind)) {
+    if (param_count != 0) {
+      ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
+                      MessageTemplate::kBadGetterArity);
+      *ok = false;
+    }
+  } else if (IsSetterFunction(function_kind)) {
+    if (param_count != 1) {
+      ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
+                      MessageTemplate::kBadSetterArity);
+      *ok = false;
+    }
+    if (has_rest) {
+      ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
+                      MessageTemplate::kBadSetterRestParameter);
+      *ok = false;
+    }
   }
 }
 
@@ -3082,10 +3090,10 @@
       // Single-expression body
       int pos = position();
       parenthesized_function_ = false;
-      ExpressionClassifier classifier;
+      ExpressionClassifier classifier(this);
       ExpressionT expression =
           ParseAssignmentExpression(accept_IN, &classifier, CHECK_OK);
-      expression = Traits::RewriteNonPattern(expression, &classifier, CHECK_OK);
+      Traits::RewriteNonPattern(&classifier, CHECK_OK);
       body = this->NewStatementList(1, zone());
       this->AddParameterInitializationBlock(formal_parameters, body, CHECK_OK);
       body->Add(factory()->NewReturnStatement(expression, pos), zone());
@@ -3191,7 +3199,7 @@
 
     int expr_pos = peek_position();
     ExpressionT expression = this->ParseExpression(true, classifier, CHECK_OK);
-    expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+    Traits::RewriteNonPattern(classifier, CHECK_OK);
     Traits::AddTemplateExpression(&ts, expression);
 
     if (peek() != Token::RBRACE) {
@@ -3245,7 +3253,7 @@
 ParserBase<Traits>::CheckAndRewriteReferenceExpression(
     ExpressionT expression, int beg_pos, int end_pos,
     MessageTemplate::Template message, ParseErrorType type, bool* ok) {
-  ExpressionClassifier classifier;
+  ExpressionClassifier classifier(this);
   ExpressionT result = ClassifyAndRewriteReferenceExpression(
       &classifier, expression, beg_pos, end_pos, message, type);
   ValidateExpression(&classifier, ok);
@@ -3299,21 +3307,11 @@
 void ParserBase<Traits>::CheckDestructuringElement(
     ExpressionT expression, ExpressionClassifier* classifier, int begin,
     int end) {
-  static const MessageTemplate::Template message =
-      MessageTemplate::kInvalidDestructuringTarget;
-  const Scanner::Location location(begin, end);
-  if (expression->IsArrayLiteral() || expression->IsObjectLiteral() ||
-      expression->IsAssignment()) {
-    if (expression->is_parenthesized()) {
-      classifier->RecordPatternError(location, message);
-    }
-    return;
-  }
-
-  if (expression->IsProperty()) {
-    classifier->RecordBindingPatternError(location, message);
-  } else if (!this->IsAssignableIdentifier(expression)) {
-    classifier->RecordPatternError(location, message);
+  if (!IsValidPattern(expression) && !expression->IsAssignment() &&
+      !IsValidReferenceExpression(expression)) {
+    classifier->RecordAssignmentPatternError(
+        Scanner::Location(begin, end),
+        MessageTemplate::kInvalidDestructuringTarget);
   }
 }
 
@@ -3375,6 +3373,8 @@
     return;
   }
 }
+
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/parsing/parser.cc b/src/parsing/parser.cc
index b1b8c13..968e8ed 100644
--- a/src/parsing/parser.cc
+++ b/src/parsing/parser.cc
@@ -6,6 +6,7 @@
 
 #include "src/api.h"
 #include "src/ast/ast.h"
+#include "src/ast/ast-expression-rewriter.h"
 #include "src/ast/ast-expression-visitor.h"
 #include "src/ast/ast-literal-reindexer.h"
 #include "src/ast/scopeinfo.h"
@@ -22,6 +23,7 @@
 #include "src/parsing/scanner-character-streams.h"
 #include "src/runtime/runtime.h"
 #include "src/string-stream.h"
+#include "src/tracing/trace-event.h"
 
 namespace v8 {
 namespace internal {
@@ -178,15 +180,14 @@
   }
 }
 
-
-FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
+FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
+                                            bool call_super, Scope* scope,
                                             int pos, int end_pos,
                                             LanguageMode language_mode) {
   int materialized_literal_count = -1;
   int expected_property_count = -1;
   int parameter_count = 0;
-  const AstRawString* name = ast_value_factory()->empty_string();
-
+  if (name == nullptr) name = ast_value_factory()->empty_string();
 
   FunctionKind kind = call_super ? FunctionKind::kDefaultSubclassConstructor
                                  : FunctionKind::kDefaultBaseConstructor;
@@ -642,10 +643,16 @@
 }
 
 
-Expression* ParserTraits::DefaultConstructor(bool call_super, Scope* scope,
-                                             int pos, int end_pos,
-                                             LanguageMode mode) {
-  return parser_->DefaultConstructor(call_super, scope, pos, end_pos, mode);
+Expression* ParserTraits::FunctionSentExpression(Scope* scope,
+                                                 AstNodeFactory* factory,
+                                                 int pos) {
+  // We desugar function.sent into %GeneratorGetInput(generator).
+  Zone* zone = parser_->zone();
+  ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(1, zone);
+  VariableProxy* generator = factory->NewVariableProxy(
+      parser_->function_state_->generator_object_variable());
+  args->Add(generator, zone);
+  return factory->NewCallRuntime(Runtime::kGeneratorGetInput, args, pos);
 }
 
 
@@ -721,11 +728,10 @@
     const AstRawString* name, Scanner::Location function_name_location,
     FunctionNameValidity function_name_validity, FunctionKind kind,
     int function_token_position, FunctionLiteral::FunctionType type,
-    FunctionLiteral::ArityRestriction arity_restriction,
     LanguageMode language_mode, bool* ok) {
   return parser_->ParseFunctionLiteral(
       name, function_name_location, function_name_validity, kind,
-      function_token_position, type, arity_restriction, language_mode, ok);
+      function_token_position, type, language_mode, ok);
 }
 
 
@@ -767,6 +773,7 @@
   set_allow_legacy_const(FLAG_legacy_const);
   set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
   set_allow_harmony_function_name(FLAG_harmony_function_name);
+  set_allow_harmony_function_sent(FLAG_harmony_function_sent);
   for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
        ++feature) {
     use_counts_[feature] = 0;
@@ -789,6 +796,7 @@
   DCHECK(parsing_on_main_thread_);
 
   HistogramTimerScope timer_scope(isolate->counters()->parse(), true);
+  TRACE_EVENT0("v8", "V8.Parse");
   Handle<String> source(String::cast(info->script()->source()));
   isolate->counters()->total_parse_size()->Increment(source->length());
   base::ElapsedTimer timer;
@@ -935,13 +943,9 @@
 
     if (ok) {
       ParserTraits::RewriteDestructuringAssignments();
-      result = factory()->NewFunctionLiteral(
-          ast_value_factory()->empty_string(), scope_, body,
-          function_state.materialized_literal_count(),
-          function_state.expected_property_count(), 0,
-          FunctionLiteral::kNoDuplicateParameters,
-          FunctionLiteral::kGlobalOrEval, FunctionLiteral::kShouldLazyCompile,
-          FunctionKind::kNormalFunction, 0);
+      result = factory()->NewScriptOrEvalFunctionLiteral(
+          scope_, body, function_state.materialized_literal_count(),
+          function_state.expected_property_count());
     }
   }
 
@@ -957,6 +961,7 @@
   // called in the main thread.
   DCHECK(parsing_on_main_thread_);
   HistogramTimerScope timer_scope(isolate->counters()->parse_lazy());
+  TRACE_EVENT0("v8", "V8.ParseLazy");
   Handle<String> source(String::cast(info->script()->source()));
   isolate->counters()->total_parse_size()->Increment(source->length());
   base::ElapsedTimer timer;
@@ -990,6 +995,18 @@
   return result;
 }
 
+static FunctionLiteral::FunctionType ComputeFunctionType(
+    Handle<SharedFunctionInfo> shared_info) {
+  if (shared_info->is_declaration()) {
+    return FunctionLiteral::kDeclaration;
+  } else if (shared_info->is_named_expression()) {
+    return FunctionLiteral::kNamedExpression;
+  } else if (IsConciseMethod(shared_info->kind()) ||
+             IsAccessorFunction(shared_info->kind())) {
+    return FunctionLiteral::kAccessorOrMethod;
+  }
+  return FunctionLiteral::kAnonymousExpression;
+}
 
 FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
                                    Utf16CharacterStream* source) {
@@ -1028,11 +1045,7 @@
            is_strict(info->language_mode()));
     DCHECK(info->language_mode() == shared_info->language_mode());
     FunctionLiteral::FunctionType function_type =
-        shared_info->is_expression()
-            ? (shared_info->is_anonymous()
-                   ? FunctionLiteral::kAnonymousExpression
-                   : FunctionLiteral::kNamedExpression)
-            : FunctionLiteral::kDeclaration;
+        ComputeFunctionType(shared_info);
     bool ok = true;
 
     if (shared_info->is_arrow()) {
@@ -1050,7 +1063,7 @@
       SetLanguageMode(scope, shared_info->language_mode());
 
       scope->set_start_position(shared_info->start_position());
-      ExpressionClassifier formals_classifier;
+      ExpressionClassifier formals_classifier(this);
       ParserFormalParameters formals(scope);
       Checkpoint checkpoint(this);
       {
@@ -1096,15 +1109,15 @@
         }
       }
     } else if (shared_info->is_default_constructor()) {
-      result = DefaultConstructor(IsSubclassConstructor(shared_info->kind()),
-                                  scope, shared_info->start_position(),
-                                  shared_info->end_position(),
-                                  shared_info->language_mode());
+      result = DefaultConstructor(
+          raw_name, IsSubclassConstructor(shared_info->kind()), scope,
+          shared_info->start_position(), shared_info->end_position(),
+          shared_info->language_mode());
     } else {
-      result = ParseFunctionLiteral(
-          raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck,
-          shared_info->kind(), RelocInfo::kNoPosition, function_type,
-          FunctionLiteral::kNormalArity, shared_info->language_mode(), &ok);
+      result = ParseFunctionLiteral(raw_name, Scanner::Location::invalid(),
+                                    kSkipFunctionNameCheck, shared_info->kind(),
+                                    RelocInfo::kNoPosition, function_type,
+                                    shared_info->language_mode(), &ok);
     }
     // Make sure the results agree.
     DCHECK(ok == (result != NULL));
@@ -1260,20 +1273,11 @@
   //    Statement
   //    Declaration
 
-  if (peek() != Token::CLASS) {
-    // No more classes follow; reset the start position for the consecutive
-    // class declaration group.
-    scope_->set_class_declaration_group_start(-1);
-  }
-
   switch (peek()) {
     case Token::FUNCTION:
       return ParseFunctionDeclaration(NULL, ok);
     case Token::CLASS:
-      if (scope_->class_declaration_group_start() < 0) {
-        scope_->set_class_declaration_group_start(
-            scanner()->peek_location().beg_pos);
-      }
+      Consume(Token::CLASS);
       return ParseClassDeclaration(NULL, ok);
     case Token::CONST:
       if (allow_const()) {
@@ -1345,7 +1349,6 @@
     }
   }
 
-  scope_->module()->Freeze();
   return NULL;
 }
 
@@ -1558,24 +1561,53 @@
   Expect(Token::DEFAULT, CHECK_OK);
   Scanner::Location default_loc = scanner()->location();
 
+  const AstRawString* default_string = ast_value_factory()->default_string();
   ZoneList<const AstRawString*> names(1, zone());
-  Statement* result = NULL;
+  Statement* result = nullptr;
+  Expression* default_export = nullptr;
   switch (peek()) {
-    case Token::FUNCTION:
-      // TODO(ES6): Support parsing anonymous function declarations here.
-      result = ParseFunctionDeclaration(&names, CHECK_OK);
+    case Token::FUNCTION: {
+      Consume(Token::FUNCTION);
+      int pos = position();
+      bool is_generator = Check(Token::MUL);
+      if (peek() == Token::LPAREN) {
+        // FunctionDeclaration[+Default] ::
+        //   'function' '(' FormalParameters ')' '{' FunctionBody '}'
+        //
+        // GeneratorDeclaration[+Default] ::
+        //   'function' '*' '(' FormalParameters ')' '{' FunctionBody '}'
+        default_export = ParseFunctionLiteral(
+            default_string, Scanner::Location::invalid(),
+            kSkipFunctionNameCheck,
+            is_generator ? FunctionKind::kGeneratorFunction
+                         : FunctionKind::kNormalFunction,
+            pos, FunctionLiteral::kDeclaration, language_mode(), CHECK_OK);
+        result = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+      } else {
+        result = ParseFunctionDeclaration(pos, is_generator, &names, CHECK_OK);
+      }
       break;
+    }
 
     case Token::CLASS:
-      // TODO(ES6): Support parsing anonymous class declarations here.
-      result = ParseClassDeclaration(&names, CHECK_OK);
+      Consume(Token::CLASS);
+      if (peek() == Token::EXTENDS || peek() == Token::LBRACE) {
+        // ClassDeclaration[+Default] ::
+        //   'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
+        default_export =
+            ParseClassLiteral(default_string, Scanner::Location::invalid(),
+                              false, position(), CHECK_OK);
+        result = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+      } else {
+        result = ParseClassDeclaration(&names, CHECK_OK);
+      }
       break;
 
     default: {
       int pos = peek_position();
-      ExpressionClassifier classifier;
+      ExpressionClassifier classifier(this);
       Expression* expr = ParseAssignmentExpression(true, &classifier, CHECK_OK);
-      expr = ParserTraits::RewriteNonPattern(expr, &classifier, CHECK_OK);
+      RewriteNonPattern(&classifier, CHECK_OK);
 
       ExpectSemicolon(CHECK_OK);
       result = factory()->NewExpressionStatement(expr, pos);
@@ -1583,19 +1615,18 @@
     }
   }
 
-  const AstRawString* default_string = ast_value_factory()->default_string();
-
   DCHECK_LE(names.length(), 1);
   if (names.length() == 1) {
     scope_->module()->AddLocalExport(default_string, names.first(), zone(), ok);
     if (!*ok) {
       ParserTraits::ReportMessageAt(
           default_loc, MessageTemplate::kDuplicateExport, default_string);
-      return NULL;
+      return nullptr;
     }
   } else {
     // TODO(ES6): Assign result to a const binding with the name "*default*"
     // and add an export entry with "*default*" as the local name.
+    USE(default_export);
   }
 
   return result;
@@ -1686,6 +1717,7 @@
       break;
 
     case Token::CLASS:
+      Consume(Token::CLASS);
       result = ParseClassDeclaration(&names, CHECK_OK);
       break;
 
@@ -1921,42 +1953,44 @@
     if (var == NULL) {
       // Declare the name.
       Variable::Kind kind = Variable::NORMAL;
-      int declaration_group_start = -1;
       if (is_function_declaration) {
         kind = Variable::FUNCTION;
-      } else if (declaration->IsVariableDeclaration() &&
-                 declaration->AsVariableDeclaration()->is_class_declaration()) {
-        kind = Variable::CLASS;
-        declaration_group_start =
-            declaration->AsVariableDeclaration()->declaration_group_start();
       }
       var = declaration_scope->DeclareLocal(
-          name, mode, declaration->initialization(), kind, kNotAssigned,
-          declaration_group_start);
-    } else if (((IsLexicalVariableMode(mode) ||
-                 IsLexicalVariableMode(var->mode())) &&
-                // Allow duplicate function decls for web compat, see bug 4693.
-                (is_strict(language_mode()) || !is_function_declaration ||
-                 !var->is_function())) ||
-               ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
-                !declaration_scope->is_script_scope())) {
-      // The name was declared in this scope before; check for conflicting
-      // re-declarations. We have a conflict if either of the declarations is
-      // not a var (in script scope, we also have to ignore legacy const for
-      // compatibility). There is similar code in runtime.cc in the Declare
-      // functions. The function CheckConflictingVarDeclarations checks for
-      // var and let bindings from different scopes whereas this is a check for
-      // conflicting declarations within the same scope. This check also covers
-      // the special case
-      //
-      // function () { let x; { var x; } }
-      //
-      // because the var declaration is hoisted to the function scope where 'x'
-      // is already bound.
-      DCHECK(IsDeclaredVariableMode(var->mode()));
-      if (is_strict(language_mode()) ||
-          (allow_harmony_sloppy() && mode != CONST_LEGACY &&
-           var->mode() != CONST_LEGACY)) {
+          name, mode, declaration->initialization(), kind, kNotAssigned);
+    } else if ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
+               !declaration_scope->is_script_scope()) {
+      // Duplicate legacy const definitions throw at runtime.
+      DCHECK(is_sloppy(language_mode()));
+      Expression* expression = NewThrowSyntaxError(
+          MessageTemplate::kVarRedeclaration, name, declaration->position());
+      declaration_scope->SetIllegalRedeclaration(expression);
+    } else if ((IsLexicalVariableMode(mode) ||
+                IsLexicalVariableMode(var->mode())) &&
+               // Lexical bindings may appear for some parameters in sloppy
+               // mode even with --harmony-sloppy off.
+               (is_strict(language_mode()) || allow_harmony_sloppy())) {
+      // Allow duplicate function decls for web compat, see bug 4693.
+      if (is_sloppy(language_mode()) && is_function_declaration &&
+          var->is_function()) {
+        DCHECK(IsLexicalVariableMode(mode) &&
+               IsLexicalVariableMode(var->mode()));
+        ++use_counts_[v8::Isolate::kSloppyModeBlockScopedFunctionRedefinition];
+      } else {
+        // The name was declared in this scope before; check for conflicting
+        // re-declarations. We have a conflict if either of the declarations
+        // is not a var (in script scope, we also have to ignore legacy const
+        // for compatibility). There is similar code in runtime.cc in the
+        // Declare functions. The function CheckConflictingVarDeclarations
+        // checks for var and let bindings from different scopes whereas this
+        // is a check for conflicting declarations within the same scope. This
+        // check also covers the special case
+        //
+        // function () { let x; { var x; } }
+        //
+        // because the var declaration is hoisted to the function scope where
+        // 'x' is already bound.
+        DCHECK(IsDeclaredVariableMode(var->mode()));
         // In harmony we treat re-declarations as early errors. See
         // ES5 16 for a definition of early errors.
         if (declaration_kind == DeclarationDescriptor::NORMAL) {
@@ -1967,9 +2001,6 @@
         *ok = false;
         return nullptr;
       }
-      Expression* expression = NewThrowSyntaxError(
-          MessageTemplate::kVarRedeclaration, name, declaration->position());
-      declaration_scope->SetIllegalRedeclaration(expression);
     } else if (mode == VAR) {
       var->set_maybe_assigned();
     }
@@ -2093,14 +2124,22 @@
 
 Statement* Parser::ParseFunctionDeclaration(
     ZoneList<const AstRawString*>* names, bool* ok) {
-  // FunctionDeclaration ::
-  //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
-  // GeneratorDeclaration ::
-  //   'function' '*' Identifier '(' FormalParameterListopt ')'
-  //      '{' FunctionBody '}'
   Expect(Token::FUNCTION, CHECK_OK);
   int pos = position();
   bool is_generator = Check(Token::MUL);
+  return ParseFunctionDeclaration(pos, is_generator, names, ok);
+}
+
+
+Statement* Parser::ParseFunctionDeclaration(
+    int pos, bool is_generator, ZoneList<const AstRawString*>* names,
+    bool* ok) {
+  // FunctionDeclaration ::
+  //   'function' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
+  // GeneratorDeclaration ::
+  //   'function' '*' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
+  //
+  // 'function' and '*' (if present) have been consumed by the caller.
   bool is_strict_reserved = false;
   const AstRawString* name = ParseIdentifierOrStrictReservedWord(
       &is_strict_reserved, CHECK_OK);
@@ -2113,8 +2152,7 @@
                          : kFunctionNameValidityUnknown,
       is_generator ? FunctionKind::kGeneratorFunction
                    : FunctionKind::kNormalFunction,
-      pos, FunctionLiteral::kDeclaration, FunctionLiteral::kNormalArity,
-      language_mode(), CHECK_OK);
+      pos, FunctionLiteral::kDeclaration, language_mode(), CHECK_OK);
 
   // Even if we're not at the top-level of the global or a function
   // scope, we treat it as such and introduce the function with its
@@ -2151,6 +2189,8 @@
   // ClassDeclaration ::
   //   'class' Identifier ('extends' LeftHandExpression)? '{' ClassBody '}'
   //
+  // 'class' is expected to be consumed by the caller.
+  //
   // A ClassDeclaration
   //
   //   class C { ... }
@@ -2161,7 +2201,6 @@
   //
   // so rewrite it as such.
 
-  Expect(Token::CLASS, CHECK_OK);
   if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
     ReportMessage(MessageTemplate::kSloppyLexical);
     *ok = false;
@@ -2177,30 +2216,10 @@
 
   VariableMode mode = is_strong(language_mode()) ? CONST : LET;
   VariableProxy* proxy = NewUnresolved(name, mode);
-  const bool is_class_declaration = true;
-  Declaration* declaration = factory()->NewVariableDeclaration(
-      proxy, mode, scope_, pos, is_class_declaration,
-      scope_->class_declaration_group_start());
-  Variable* outer_class_variable =
-      Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
+  Declaration* declaration =
+      factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
+  Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
   proxy->var()->set_initializer_position(position());
-  // This is needed because a class ("class Name { }") creates two bindings (one
-  // in the outer scope, and one in the class scope). The method is a function
-  // scope inside the inner scope (class scope). The consecutive class
-  // declarations are in the outer scope.
-  if (value->class_variable_proxy() && value->class_variable_proxy()->var() &&
-      outer_class_variable->is_class()) {
-    // In some cases, the outer variable is not detected as a class variable;
-    // this happens e.g., for lazy methods. They are excluded from strong mode
-    // checks for now. TODO(marja, rossberg): re-create variables with the
-    // correct Kind and remove this hack.
-    value->class_variable_proxy()
-        ->var()
-        ->AsClassVariable()
-        ->set_declaration_group_start(
-            outer_class_variable->AsClassVariable()->declaration_group_start());
-  }
-
   Assignment* assignment =
       factory()->NewAssignment(Token::INIT, proxy, value, pos);
   Statement* assignment_statement =
@@ -2281,17 +2300,16 @@
   // is inside an initializer block, it is ignored.
 
   DeclarationParsingResult parsing_result;
-  ParseVariableDeclarations(var_context, &parsing_result, CHECK_OK);
+  Block* result =
+      ParseVariableDeclarations(var_context, &parsing_result, names, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
-
-  Block* result = parsing_result.BuildInitializationBlock(names, CHECK_OK);
   return result;
 }
 
-
-void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
-                                       DeclarationParsingResult* parsing_result,
-                                       bool* ok) {
+Block* Parser::ParseVariableDeclarations(
+    VariableDeclarationContext var_context,
+    DeclarationParsingResult* parsing_result,
+    ZoneList<const AstRawString*>* names, bool* ok) {
   // VariableDeclarations ::
   //   ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
   //
@@ -2311,17 +2329,19 @@
   parsing_result->descriptor.declaration_pos = peek_position();
   parsing_result->descriptor.initialization_pos = peek_position();
   parsing_result->descriptor.mode = VAR;
-  // True if the binding needs initialization. 'let' and 'const' declared
-  // bindings are created uninitialized by their declaration nodes and
-  // need initialization. 'var' declared bindings are always initialized
-  // immediately by their declaration nodes.
-  parsing_result->descriptor.needs_init = false;
+
+  Block* init_block = nullptr;
+  if (var_context != kForStatement) {
+    init_block = factory()->NewBlock(
+        NULL, 1, true, parsing_result->descriptor.declaration_pos);
+  }
+
   if (peek() == Token::VAR) {
     if (is_strong(language_mode())) {
       Scanner::Location location = scanner()->peek_location();
       ReportMessageAt(location, MessageTemplate::kStrongVar);
       *ok = false;
-      return;
+      return nullptr;
     }
     Consume(Token::VAR);
   } else if (peek() == Token::CONST && allow_const()) {
@@ -2334,12 +2354,10 @@
       DCHECK(var_context != kStatement);
       parsing_result->descriptor.mode = CONST;
     }
-    parsing_result->descriptor.needs_init = true;
   } else if (peek() == Token::LET && allow_let()) {
     Consume(Token::LET);
     DCHECK(var_context != kStatement);
     parsing_result->descriptor.mode = LET;
-    parsing_result->descriptor.needs_init = true;
   } else {
     UNREACHABLE();  // by current callers
   }
@@ -2350,7 +2368,6 @@
 
   bool first_declaration = true;
   int bindings_start = peek_position();
-  bool is_for_iteration_variable;
   do {
     FuncNameInferrer::State fni_state(fni_);
 
@@ -2360,27 +2377,20 @@
     Expression* pattern;
     int decl_pos = peek_position();
     {
-      ExpressionClassifier pattern_classifier;
+      ExpressionClassifier pattern_classifier(this);
       Token::Value next = peek();
-      pattern = ParsePrimaryExpression(&pattern_classifier, ok);
-      if (!*ok) return;
-      ValidateBindingPattern(&pattern_classifier, ok);
-      if (!*ok) return;
+      pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+      ValidateBindingPattern(&pattern_classifier, CHECK_OK);
       if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
-        ValidateLetPattern(&pattern_classifier, ok);
-        if (!*ok) return;
+        ValidateLetPattern(&pattern_classifier, CHECK_OK);
       }
       if (!allow_harmony_destructuring_bind() && !pattern->IsVariableProxy()) {
         ReportUnexpectedToken(next);
         *ok = false;
-        return;
+        return nullptr;
       }
     }
 
-    bool is_pattern =
-        (pattern->IsObjectLiteral() || pattern->IsArrayLiteral()) &&
-        !pattern->is_parenthesized();
-
     Scanner::Location variable_loc = scanner()->location();
     const AstRawString* single_name =
         pattern->IsVariableProxy() ? pattern->AsVariableProxy()->raw_name()
@@ -2389,25 +2399,13 @@
       if (fni_ != NULL) fni_->PushVariableName(single_name);
     }
 
-    is_for_iteration_variable =
-        var_context == kForStatement &&
-        (peek() == Token::IN || PeekContextualKeyword(CStrVector("of")));
-    if (is_for_iteration_variable &&
-        (parsing_result->descriptor.mode == CONST ||
-         parsing_result->descriptor.mode == CONST_LEGACY)) {
-      parsing_result->descriptor.needs_init = false;
-    }
-
     Expression* value = NULL;
-    // Harmony consts have non-optional initializers.
     int initializer_position = RelocInfo::kNoPosition;
     if (Check(Token::ASSIGN)) {
-      ExpressionClassifier classifier;
+      ExpressionClassifier classifier(this);
       value = ParseAssignmentExpression(var_context != kForStatement,
-                                        &classifier, ok);
-      if (!*ok) return;
-      value = ParserTraits::RewriteNonPattern(value, &classifier, ok);
-      if (!*ok) return;
+                                        &classifier, CHECK_OK);
+      RewriteNonPattern(&classifier, CHECK_OK);
       variable_loc.end_pos = scanner()->location().end_pos;
 
       if (!parsing_result->first_initializer_loc.IsValid()) {
@@ -2424,48 +2422,60 @@
         }
       }
 
-      if (allow_harmony_function_name() && single_name) {
-        if (value->IsFunctionLiteral()) {
-          auto function_literal = value->AsFunctionLiteral();
-          if (function_literal->is_anonymous()) {
-            function_literal->set_raw_name(single_name);
-          }
-        } else if (value->IsClassLiteral()) {
-          auto class_literal = value->AsClassLiteral();
-          if (class_literal->raw_name() == nullptr) {
-            class_literal->set_raw_name(single_name);
-          }
-        }
+      if (allow_harmony_function_name()) {
+        ParserTraits::SetFunctionNameFromIdentifierRef(value, pattern);
       }
 
       // End position of the initializer is after the assignment expression.
       initializer_position = scanner()->location().end_pos;
     } else {
-      if ((parsing_result->descriptor.mode == CONST || is_pattern) &&
-          !is_for_iteration_variable) {
-        ParserTraits::ReportMessageAt(
-            Scanner::Location(decl_pos, scanner()->location().end_pos),
-            MessageTemplate::kDeclarationMissingInitializer,
-            is_pattern ? "destructuring" : "const");
-        *ok = false;
-        return;
+      // Initializers may be either required or implied unless this is a
+      // for-in/of iteration variable.
+      if (var_context != kForStatement || !PeekInOrOf()) {
+        // ES6 'const' and binding patterns require initializers.
+        if (parsing_result->descriptor.mode == CONST ||
+            !pattern->IsVariableProxy()) {
+          ParserTraits::ReportMessageAt(
+              Scanner::Location(decl_pos, scanner()->location().end_pos),
+              MessageTemplate::kDeclarationMissingInitializer,
+              !pattern->IsVariableProxy() ? "destructuring" : "const");
+          *ok = false;
+          return nullptr;
+        }
+
+        // 'let x' and (legacy) 'const x' initialize 'x' to undefined.
+        if (parsing_result->descriptor.mode == LET ||
+            parsing_result->descriptor.mode == CONST_LEGACY) {
+          value = GetLiteralUndefined(position());
+        }
       }
+
       // End position of the initializer is after the variable.
       initializer_position = position();
     }
 
-    // Make sure that 'const x' and 'let x' initialize 'x' to undefined.
-    if (value == NULL && parsing_result->descriptor.needs_init) {
-      value = GetLiteralUndefined(position());
+    DeclarationParsingResult::Declaration decl(pattern, initializer_position,
+                                               value);
+    if (var_context == kForStatement) {
+      // Save the declaration for further handling in ParseForStatement.
+      parsing_result->declarations.Add(decl);
+    } else {
+      // Immediately declare the variable otherwise. This avoids O(N^2)
+      // behavior (where N is the number of variables in a single
+      // declaration) in the PatternRewriter having to do with removing
+      // and adding VariableProxies to the Scope (see bug 4699).
+      DCHECK_NOT_NULL(init_block);
+      PatternRewriter::DeclareAndInitializeVariables(
+          init_block, &parsing_result->descriptor, &decl, names, CHECK_OK);
     }
-
-    parsing_result->declarations.Add(DeclarationParsingResult::Declaration(
-        pattern, initializer_position, value));
     first_declaration = false;
   } while (peek() == Token::COMMA);
 
   parsing_result->bindings_loc =
       Scanner::Location(bindings_start, scanner()->location().end_pos);
+
+  DCHECK(*ok);
+  return init_block;
 }
 
 
@@ -2511,13 +2521,13 @@
           IsClassConstructor(function_state_->kind())) {
         bool is_this = peek() == Token::THIS;
         Expression* expr;
-        ExpressionClassifier classifier;
+        ExpressionClassifier classifier(this);
         if (is_this) {
           expr = ParseStrongInitializationExpression(&classifier, CHECK_OK);
         } else {
           expr = ParseStrongSuperCallExpression(&classifier, CHECK_OK);
         }
-        expr = ParserTraits::RewriteNonPattern(expr, &classifier, CHECK_OK);
+        RewriteNonPattern(&classifier, CHECK_OK);
         switch (peek()) {
           case Token::SEMICOLON:
             Consume(Token::SEMICOLON);
@@ -2728,23 +2738,22 @@
 
     if (IsSubclassConstructor(function_state_->kind())) {
       // For subclass constructors we need to return this in case of undefined
-      // and throw an exception in case of a non object.
+      // return a Smi (transformed into an exception in the ConstructStub)
+      // for a non object.
       //
       //   return expr;
       //
       // Is rewritten as:
       //
       //   return (temp = expr) === undefined ? this :
-      //       %_IsJSReceiver(temp) ? temp : throw new TypeError(...);
+      //       %_IsJSReceiver(temp) ? temp : 1;
+
+      // temp = expr
       Variable* temp = scope_->NewTemporary(
           ast_value_factory()->empty_string());
       Assignment* assign = factory()->NewAssignment(
           Token::ASSIGN, factory()->NewVariableProxy(temp), return_value, pos);
 
-      Expression* throw_expression =
-          NewThrowTypeError(MessageTemplate::kDerivedConstructorReturn,
-                            ast_value_factory()->empty_string(), pos);
-
       // %_IsJSReceiver(temp)
       ZoneList<Expression*>* is_spec_object_args =
           new (zone()) ZoneList<Expression*>(1, zone());
@@ -2755,7 +2764,7 @@
       // %_IsJSReceiver(temp) ? temp : throw_expression
       Expression* is_object_conditional = factory()->NewConditional(
           is_spec_object_call, factory()->NewVariableProxy(temp),
-          throw_expression, pos);
+          factory()->NewSmiLiteral(1, pos), pos);
 
       // temp === undefined
       Expression* is_undefined = factory()->NewCompareOperation(
@@ -2768,7 +2777,10 @@
           is_object_conditional, pos);
     }
 
-    return_value->MarkTail();
+    // ES6 14.6.1 Static Semantics: IsInTailPosition
+    if (FLAG_harmony_tailcalls && !is_sloppy(language_mode())) {
+      function_state_->AddExpressionInTailPosition(return_value);
+    }
   }
   ExpectSemicolon(CHECK_OK);
 
@@ -2974,6 +2986,40 @@
       factory()->NewThrow(exception, pos), pos);
 }
 
+class Parser::DontCollectExpressionsInTailPositionScope {
+ public:
+  DontCollectExpressionsInTailPositionScope(
+      Parser::FunctionState* function_state)
+      : function_state_(function_state),
+        old_value_(function_state->collect_expressions_in_tail_position()) {
+    function_state->set_collect_expressions_in_tail_position(false);
+  }
+  ~DontCollectExpressionsInTailPositionScope() {
+    function_state_->set_collect_expressions_in_tail_position(old_value_);
+  }
+
+ private:
+  Parser::FunctionState* function_state_;
+  bool old_value_;
+};
+
+// Collects all return expressions at tail call position in this scope
+// to a separate list.
+class Parser::CollectExpressionsInTailPositionToListScope {
+ public:
+  CollectExpressionsInTailPositionToListScope(
+      Parser::FunctionState* function_state, List<Expression*>* list)
+      : function_state_(function_state), list_(list) {
+    function_state->expressions_in_tail_position().Swap(list_);
+  }
+  ~CollectExpressionsInTailPositionToListScope() {
+    function_state_->expressions_in_tail_position().Swap(list_);
+  }
+
+ private:
+  Parser::FunctionState* function_state_;
+  List<Expression*>* list_;
+};
 
 TryStatement* Parser::ParseTryStatement(bool* ok) {
   // TryStatement ::
@@ -2990,7 +3036,11 @@
   Expect(Token::TRY, CHECK_OK);
   int pos = position();
 
-  Block* try_block = ParseBlock(NULL, CHECK_OK);
+  Block* try_block;
+  {
+    DontCollectExpressionsInTailPositionScope no_tail_calls(function_state_);
+    try_block = ParseBlock(NULL, CHECK_OK);
+  }
 
   Token::Value tok = peek();
   if (tok != Token::CATCH && tok != Token::FINALLY) {
@@ -3002,6 +3052,7 @@
   Scope* catch_scope = NULL;
   Variable* catch_variable = NULL;
   Block* catch_block = NULL;
+  List<Expression*> expressions_in_tail_position_in_catch_block;
   if (tok == Token::CATCH) {
     Consume(Token::CATCH);
 
@@ -3009,7 +3060,7 @@
     catch_scope = NewScope(scope_, CATCH_SCOPE);
     catch_scope->set_start_position(scanner()->location().beg_pos);
 
-    ExpressionClassifier pattern_classifier;
+    ExpressionClassifier pattern_classifier(this);
     Expression* pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
     ValidateBindingPattern(&pattern_classifier, CHECK_OK);
 
@@ -3027,6 +3078,9 @@
     Expect(Token::RPAREN, CHECK_OK);
 
     {
+      CollectExpressionsInTailPositionToListScope
+          collect_expressions_in_tail_position_scope(
+              function_state_, &expressions_in_tail_position_in_catch_block);
       BlockState block_state(&scope_, catch_scope);
 
       // TODO(adamk): Make a version of ParseBlock that takes a scope and
@@ -3047,7 +3101,6 @@
           descriptor.scope = scope_;
           descriptor.hoist_scope = nullptr;
           descriptor.mode = LET;
-          descriptor.needs_init = true;
           descriptor.declaration_pos = pattern->position();
           descriptor.initialization_pos = pattern->position();
 
@@ -3102,6 +3155,11 @@
 
   TryStatement* result = NULL;
   if (catch_block != NULL) {
+    // For a try-catch construct append return expressions from the catch block
+    // to the list of return expressions.
+    function_state_->expressions_in_tail_position().AddAll(
+        expressions_in_tail_position_in_catch_block);
+
     DCHECK(finally_block == NULL);
     DCHECK(catch_scope != NULL && catch_variable != NULL);
     result = factory()->NewTryCatchStatement(try_block, catch_scope,
@@ -3262,6 +3320,7 @@
     }
 
     for_of->Initialize(each, subject, body,
+                       iterator,
                        assign_iterator,
                        next_result,
                        result_done,
@@ -3288,9 +3347,8 @@
   }
 }
 
-
 Statement* Parser::DesugarLexicalBindingsInForStatement(
-    Scope* inner_scope, bool is_const, ZoneList<const AstRawString*>* names,
+    Scope* inner_scope, VariableMode mode, ZoneList<const AstRawString*>* names,
     ForStatement* loop, Statement* init, Expression* cond, Statement* next,
     Statement* body, bool* ok) {
   // ES6 13.7.4.8 specifies that on each loop iteration the let variables are
@@ -3331,7 +3389,6 @@
   //  }
 
   DCHECK(names->length() > 0);
-  Scope* for_scope = scope_;
   ZoneList<Variable*> temps(names->length(), zone());
 
   Block* outer_block = factory()->NewBlock(NULL, names->length() + 4, false,
@@ -3384,150 +3441,155 @@
   ForStatement* outer_loop =
       factory()->NewForStatement(NULL, RelocInfo::kNoPosition);
   outer_block->statements()->Add(outer_loop, zone());
-
-  outer_block->set_scope(for_scope);
-  scope_ = inner_scope;
+  outer_block->set_scope(scope_);
 
   Block* inner_block =
       factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
-  Block* ignore_completion_block = factory()->NewBlock(
-      NULL, names->length() + 3, true, RelocInfo::kNoPosition);
-  ZoneList<Variable*> inner_vars(names->length(), zone());
-  // For each let variable x:
-  //    make statement: let/const x = temp_x.
-  VariableMode mode = is_const ? CONST : LET;
-  for (int i = 0; i < names->length(); i++) {
-    VariableProxy* proxy = NewUnresolved(names->at(i), mode);
-    Declaration* declaration = factory()->NewVariableDeclaration(
-        proxy, mode, scope_, RelocInfo::kNoPosition);
-    Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
-    inner_vars.Add(declaration->proxy()->var(), zone());
-    VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
-    Assignment* assignment = factory()->NewAssignment(
-        Token::INIT, proxy, temp_proxy, RelocInfo::kNoPosition);
-    Statement* assignment_statement =
-        factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
-    DCHECK(init->position() != RelocInfo::kNoPosition);
-    proxy->var()->set_initializer_position(init->position());
-    ignore_completion_block->statements()->Add(assignment_statement, zone());
-  }
-
-  // Make statement: if (first == 1) { first = 0; } else { next; }
-  if (next) {
-    DCHECK(first);
-    Expression* compare = NULL;
-    // Make compare expression: first == 1.
-    {
-      Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
-      VariableProxy* first_proxy = factory()->NewVariableProxy(first);
-      compare = factory()->NewCompareOperation(Token::EQ, first_proxy, const1,
-                                               RelocInfo::kNoPosition);
-    }
-    Statement* clear_first = NULL;
-    // Make statement: first = 0.
-    {
-      VariableProxy* first_proxy = factory()->NewVariableProxy(first);
-      Expression* const0 = factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
-      Assignment* assignment = factory()->NewAssignment(
-          Token::ASSIGN, first_proxy, const0, RelocInfo::kNoPosition);
-      clear_first =
-          factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
-    }
-    Statement* clear_first_or_next = factory()->NewIfStatement(
-        compare, clear_first, next, RelocInfo::kNoPosition);
-    ignore_completion_block->statements()->Add(clear_first_or_next, zone());
-  }
-
-  Variable* flag = scope_->NewTemporary(temp_name);
-  // Make statement: flag = 1.
   {
-    VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
-    Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
-    Assignment* assignment = factory()->NewAssignment(
-        Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
-    Statement* assignment_statement =
-        factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
-    ignore_completion_block->statements()->Add(assignment_statement, zone());
-  }
+    BlockState block_state(&scope_, inner_scope);
 
-  // Make statement: if (!cond) break.
-  if (cond) {
-    Statement* stop =
-        factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
-    Statement* noop = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
-    ignore_completion_block->statements()->Add(
-        factory()->NewIfStatement(cond, noop, stop, cond->position()), zone());
-  }
-
-  inner_block->statements()->Add(ignore_completion_block, zone());
-  // Make cond expression for main loop: flag == 1.
-  Expression* flag_cond = NULL;
-  {
-    Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
-    VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
-    flag_cond = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
-                                               RelocInfo::kNoPosition);
-  }
-
-  // Create chain of expressions "flag = 0, temp_x = x, ..."
-  Statement* compound_next_statement = NULL;
-  {
-    Expression* compound_next = NULL;
-    // Make expression: flag = 0.
-    {
-      VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
-      Expression* const0 = factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
-      compound_next = factory()->NewAssignment(Token::ASSIGN, flag_proxy,
-                                               const0, RelocInfo::kNoPosition);
-    }
-
-    // Make the comma-separated list of temp_x = x assignments.
-    int inner_var_proxy_pos = scanner()->location().beg_pos;
+    Block* ignore_completion_block = factory()->NewBlock(
+        NULL, names->length() + 3, true, RelocInfo::kNoPosition);
+    ZoneList<Variable*> inner_vars(names->length(), zone());
+    // For each let variable x:
+    //    make statement: let/const x = temp_x.
     for (int i = 0; i < names->length(); i++) {
+      VariableProxy* proxy = NewUnresolved(names->at(i), mode);
+      Declaration* declaration = factory()->NewVariableDeclaration(
+          proxy, mode, scope_, RelocInfo::kNoPosition);
+      Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
+      inner_vars.Add(declaration->proxy()->var(), zone());
       VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
-      VariableProxy* proxy =
-          factory()->NewVariableProxy(inner_vars.at(i), inner_var_proxy_pos);
       Assignment* assignment = factory()->NewAssignment(
-          Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
-      compound_next = factory()->NewBinaryOperation(
-          Token::COMMA, compound_next, assignment, RelocInfo::kNoPosition);
+          Token::INIT, proxy, temp_proxy, RelocInfo::kNoPosition);
+      Statement* assignment_statement =
+          factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
+      DCHECK(init->position() != RelocInfo::kNoPosition);
+      proxy->var()->set_initializer_position(init->position());
+      ignore_completion_block->statements()->Add(assignment_statement, zone());
     }
 
-    compound_next_statement = factory()->NewExpressionStatement(
-        compound_next, RelocInfo::kNoPosition);
-  }
+    // Make statement: if (first == 1) { first = 0; } else { next; }
+    if (next) {
+      DCHECK(first);
+      Expression* compare = NULL;
+      // Make compare expression: first == 1.
+      {
+        Expression* const1 =
+            factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+        VariableProxy* first_proxy = factory()->NewVariableProxy(first);
+        compare = factory()->NewCompareOperation(Token::EQ, first_proxy, const1,
+                                                 RelocInfo::kNoPosition);
+      }
+      Statement* clear_first = NULL;
+      // Make statement: first = 0.
+      {
+        VariableProxy* first_proxy = factory()->NewVariableProxy(first);
+        Expression* const0 =
+            factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
+        Assignment* assignment = factory()->NewAssignment(
+            Token::ASSIGN, first_proxy, const0, RelocInfo::kNoPosition);
+        clear_first = factory()->NewExpressionStatement(assignment,
+                                                        RelocInfo::kNoPosition);
+      }
+      Statement* clear_first_or_next = factory()->NewIfStatement(
+          compare, clear_first, next, RelocInfo::kNoPosition);
+      ignore_completion_block->statements()->Add(clear_first_or_next, zone());
+    }
 
-  // Make statement: labels: for (; flag == 1; flag = 0, temp_x = x)
-  // Note that we re-use the original loop node, which retains its labels
-  // and ensures that any break or continue statements in body point to
-  // the right place.
-  loop->Initialize(NULL, flag_cond, compound_next_statement, body);
-  inner_block->statements()->Add(loop, zone());
+    Variable* flag = scope_->NewTemporary(temp_name);
+    // Make statement: flag = 1.
+    {
+      VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+      Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+      Assignment* assignment = factory()->NewAssignment(
+          Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
+      Statement* assignment_statement =
+          factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
+      ignore_completion_block->statements()->Add(assignment_statement, zone());
+    }
 
-  // Make statement: {{if (flag == 1) break;}}
-  {
-    Expression* compare = NULL;
-    // Make compare expresion: flag == 1.
+    // Make statement: if (!cond) break.
+    if (cond) {
+      Statement* stop =
+          factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
+      Statement* noop = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+      ignore_completion_block->statements()->Add(
+          factory()->NewIfStatement(cond, noop, stop, cond->position()),
+          zone());
+    }
+
+    inner_block->statements()->Add(ignore_completion_block, zone());
+    // Make cond expression for main loop: flag == 1.
+    Expression* flag_cond = NULL;
     {
       Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
       VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
-      compare = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
-                                               RelocInfo::kNoPosition);
+      flag_cond = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
+                                                 RelocInfo::kNoPosition);
     }
-    Statement* stop =
-        factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
-    Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
-    Statement* if_flag_break =
-        factory()->NewIfStatement(compare, stop, empty, RelocInfo::kNoPosition);
-    Block* ignore_completion_block =
-        factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
-    ignore_completion_block->statements()->Add(if_flag_break, zone());
-    inner_block->statements()->Add(ignore_completion_block, zone());
-  }
 
-  inner_scope->set_end_position(scanner()->location().end_pos);
-  inner_block->set_scope(inner_scope);
-  scope_ = for_scope;
+    // Create chain of expressions "flag = 0, temp_x = x, ..."
+    Statement* compound_next_statement = NULL;
+    {
+      Expression* compound_next = NULL;
+      // Make expression: flag = 0.
+      {
+        VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+        Expression* const0 =
+            factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
+        compound_next = factory()->NewAssignment(
+            Token::ASSIGN, flag_proxy, const0, RelocInfo::kNoPosition);
+      }
+
+      // Make the comma-separated list of temp_x = x assignments.
+      int inner_var_proxy_pos = scanner()->location().beg_pos;
+      for (int i = 0; i < names->length(); i++) {
+        VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
+        VariableProxy* proxy =
+            factory()->NewVariableProxy(inner_vars.at(i), inner_var_proxy_pos);
+        Assignment* assignment = factory()->NewAssignment(
+            Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
+        compound_next = factory()->NewBinaryOperation(
+            Token::COMMA, compound_next, assignment, RelocInfo::kNoPosition);
+      }
+
+      compound_next_statement = factory()->NewExpressionStatement(
+          compound_next, RelocInfo::kNoPosition);
+    }
+
+    // Make statement: labels: for (; flag == 1; flag = 0, temp_x = x)
+    // Note that we re-use the original loop node, which retains its labels
+    // and ensures that any break or continue statements in body point to
+    // the right place.
+    loop->Initialize(NULL, flag_cond, compound_next_statement, body);
+    inner_block->statements()->Add(loop, zone());
+
+    // Make statement: {{if (flag == 1) break;}}
+    {
+      Expression* compare = NULL;
+      // Make compare expresion: flag == 1.
+      {
+        Expression* const1 =
+            factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+        VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+        compare = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
+                                                 RelocInfo::kNoPosition);
+      }
+      Statement* stop =
+          factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
+      Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+      Statement* if_flag_break = factory()->NewIfStatement(
+          compare, stop, empty, RelocInfo::kNoPosition);
+      Block* ignore_completion_block =
+          factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
+      ignore_completion_block->statements()->Add(if_flag_break, zone());
+      inner_block->statements()->Add(ignore_completion_block, zone());
+    }
+
+    inner_scope->set_end_position(scanner()->location().end_pos);
+    inner_block->set_scope(inner_scope);
+  }
 
   outer_loop->Initialize(NULL, NULL, NULL, inner_block);
   return outer_block;
@@ -3536,18 +3598,14 @@
 
 Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
                                      bool* ok) {
-  // ForStatement ::
-  //   'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
-
   int stmt_pos = peek_position();
-  bool is_const = false;
   Statement* init = NULL;
   ZoneList<const AstRawString*> lexical_bindings(1, zone());
 
   // Create an in-between scope for let-bound iteration variables.
-  Scope* saved_scope = scope_;
   Scope* for_scope = NewScope(scope_, BLOCK_SCOPE);
-  scope_ = for_scope;
+
+  BlockState block_state(&scope_, for_scope);
   Expect(Token::FOR, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
   for_scope->set_start_position(scanner()->location().beg_pos);
@@ -3556,23 +3614,20 @@
   if (peek() != Token::SEMICOLON) {
     if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
         (peek() == Token::LET && IsNextLetKeyword())) {
-      ParseVariableDeclarations(kForStatement, &parsing_result, CHECK_OK);
-      is_const = parsing_result.descriptor.mode == CONST;
+      ParseVariableDeclarations(kForStatement, &parsing_result, nullptr,
+                                CHECK_OK);
 
-      int num_decl = parsing_result.declarations.length();
-      bool accept_IN = num_decl >= 1;
       ForEachStatement::VisitMode mode;
       int each_beg_pos = scanner()->location().beg_pos;
       int each_end_pos = scanner()->location().end_pos;
 
-      if (accept_IN && CheckInOrOf(&mode, ok)) {
+      if (CheckInOrOf(&mode, ok)) {
         if (!*ok) return nullptr;
-        if (num_decl != 1) {
-          const char* loop_type =
-              mode == ForEachStatement::ITERATE ? "for-of" : "for-in";
+        if (parsing_result.declarations.length() != 1) {
           ParserTraits::ReportMessageAt(
               parsing_result.bindings_loc,
-              MessageTemplate::kForInOfLoopMultiBindings, loop_type);
+              MessageTemplate::kForInOfLoopMultiBindings,
+              ForEachStatement::VisitModeString(mode));
           *ok = false;
           return nullptr;
         }
@@ -3582,14 +3637,10 @@
             (is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
              IsLexicalVariableMode(parsing_result.descriptor.mode) ||
              !decl.pattern->IsVariableProxy())) {
-          if (mode == ForEachStatement::ITERATE) {
-            ReportMessageAt(parsing_result.first_initializer_loc,
-                            MessageTemplate::kForOfLoopInitializer);
-          } else {
-            // TODO(caitp): This should be an error in sloppy mode too.
-            ReportMessageAt(parsing_result.first_initializer_loc,
-                            MessageTemplate::kForInLoopInitializer);
-          }
+          ParserTraits::ReportMessageAt(
+              parsing_result.first_initializer_loc,
+              MessageTemplate::kForInOfLoopInitializer,
+              ForEachStatement::VisitModeString(mode));
           *ok = false;
           return nullptr;
         }
@@ -3599,6 +3650,7 @@
         // special case for legacy for (var/const x =.... in)
         if (!IsLexicalVariableMode(parsing_result.descriptor.mode) &&
             decl.pattern->IsVariableProxy() && decl.initializer != nullptr) {
+          ++use_counts_[v8::Isolate::kForInInitializer];
           const AstRawString* name =
               decl.pattern->AsVariableProxy()->raw_name();
           VariableProxy* single_var = scope_->NewUnresolved(
@@ -3630,52 +3682,59 @@
         //     let x;  // for TDZ
         //   }
 
-        Variable* temp = scope_->NewTemporary(
-            ast_value_factory()->dot_for_string());
+        Variable* temp =
+            scope_->NewTemporary(ast_value_factory()->dot_for_string());
         ForEachStatement* loop =
             factory()->NewForEachStatement(mode, labels, stmt_pos);
         Target target(&this->target_stack_, loop);
 
-        Expression* enumerable = ParseExpression(true, CHECK_OK);
+        Expression* enumerable;
+        if (mode == ForEachStatement::ITERATE) {
+          ExpressionClassifier classifier(this);
+          enumerable = ParseAssignmentExpression(true, &classifier, CHECK_OK);
+          RewriteNonPattern(&classifier, CHECK_OK);
+        } else {
+          enumerable = ParseExpression(true, CHECK_OK);
+        }
 
         Expect(Token::RPAREN, CHECK_OK);
 
         Scope* body_scope = NewScope(scope_, BLOCK_SCOPE);
         body_scope->set_start_position(scanner()->location().beg_pos);
-        scope_ = body_scope;
-
-        Statement* body = ParseSubStatement(NULL, CHECK_OK);
 
         Block* body_block =
             factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
 
-        auto each_initialization_block =
-            factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
         {
-          auto descriptor = parsing_result.descriptor;
-          descriptor.declaration_pos = RelocInfo::kNoPosition;
-          descriptor.initialization_pos = RelocInfo::kNoPosition;
-          decl.initializer = factory()->NewVariableProxy(temp);
+          BlockState block_state(&scope_, body_scope);
 
-          PatternRewriter::DeclareAndInitializeVariables(
-              each_initialization_block, &descriptor, &decl,
-              IsLexicalVariableMode(descriptor.mode) ? &lexical_bindings
-                                                     : nullptr,
-              CHECK_OK);
+          Statement* body = ParseSubStatement(NULL, CHECK_OK);
+
+          auto each_initialization_block =
+              factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
+          {
+            auto descriptor = parsing_result.descriptor;
+            descriptor.declaration_pos = RelocInfo::kNoPosition;
+            descriptor.initialization_pos = RelocInfo::kNoPosition;
+            decl.initializer = factory()->NewVariableProxy(temp);
+
+            PatternRewriter::DeclareAndInitializeVariables(
+                each_initialization_block, &descriptor, &decl,
+                IsLexicalVariableMode(descriptor.mode) ? &lexical_bindings
+                                                       : nullptr,
+                CHECK_OK);
+          }
+
+          body_block->statements()->Add(each_initialization_block, zone());
+          body_block->statements()->Add(body, zone());
+          VariableProxy* temp_proxy =
+              factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
+          InitializeForEachStatement(loop, temp_proxy, enumerable, body_block,
+                                     false);
         }
-
-        body_block->statements()->Add(each_initialization_block, zone());
-        body_block->statements()->Add(body, zone());
-        VariableProxy* temp_proxy =
-            factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
-        InitializeForEachStatement(loop, temp_proxy, enumerable, body_block,
-                                   false);
-        scope_ = for_scope;
         body_scope->set_end_position(scanner()->location().end_pos);
         body_scope = body_scope->FinalizeBlockScope();
-        if (body_scope != nullptr) {
-          body_block->set_scope(body_scope);
-        }
+        body_block->set_scope(body_scope);
 
         // Create a TDZ for any lexically-bound names.
         if (IsLexicalVariableMode(parsing_result.descriptor.mode)) {
@@ -3688,28 +3747,31 @@
             // TODO(adamk): This needs to be some sort of special
             // INTERNAL variable that's invisible to the debugger
             // but visible to everything else.
-            VariableProxy* tdz_proxy = NewUnresolved(lexical_bindings[i], LET);
+            VariableProxy* tdz_proxy =
+                NewUnresolved(lexical_bindings[i], LET);
             Declaration* tdz_decl = factory()->NewVariableDeclaration(
                 tdz_proxy, LET, scope_, RelocInfo::kNoPosition);
-            Variable* tdz_var = Declare(tdz_decl, DeclarationDescriptor::NORMAL,
-                                        true, CHECK_OK);
+            Variable* tdz_var = Declare(
+                tdz_decl, DeclarationDescriptor::NORMAL, true, CHECK_OK);
             tdz_var->set_initializer_position(position());
           }
         }
 
-        scope_ = saved_scope;
+        Statement* final_loop = loop->IsForOfStatement()
+            ? FinalizeForOfStatement(
+                loop->AsForOfStatement(), RelocInfo::kNoPosition)
+            : loop;
+
         for_scope->set_end_position(scanner()->location().end_pos);
         for_scope = for_scope->FinalizeBlockScope();
         // Parsed for-in loop w/ variable declarations.
         if (init_block != nullptr) {
-          init_block->statements()->Add(loop, zone());
-          if (for_scope != nullptr) {
-            init_block->set_scope(for_scope);
-          }
+          init_block->statements()->Add(final_loop, zone());
+          init_block->set_scope(for_scope);
           return init_block;
         } else {
           DCHECK_NULL(for_scope);
-          return loop;
+          return final_loop;
         }
       } else {
         init = parsing_result.BuildInitializationBlock(
@@ -3720,7 +3782,7 @@
       }
     } else {
       int lhs_beg_pos = peek_position();
-      ExpressionClassifier classifier;
+      ExpressionClassifier classifier(this);
       Expression* expression = ParseExpression(false, &classifier, CHECK_OK);
       int lhs_end_pos = scanner()->location().end_pos;
       ForEachStatement::VisitMode mode;
@@ -3738,8 +3800,7 @@
       if (is_destructuring) {
         ValidateAssignmentPattern(&classifier, CHECK_OK);
       } else {
-        expression =
-            ParserTraits::RewriteNonPattern(expression, &classifier, CHECK_OK);
+        RewriteNonPattern(&classifier, CHECK_OK);
       }
 
       if (is_for_each) {
@@ -3753,7 +3814,15 @@
             factory()->NewForEachStatement(mode, labels, stmt_pos);
         Target target(&this->target_stack_, loop);
 
-        Expression* enumerable = ParseExpression(true, CHECK_OK);
+        Expression* enumerable;
+        if (mode == ForEachStatement::ITERATE) {
+          ExpressionClassifier classifier(this);
+          enumerable = ParseAssignmentExpression(true, &classifier, CHECK_OK);
+          RewriteNonPattern(&classifier, CHECK_OK);
+        } else {
+          enumerable = ParseExpression(true, CHECK_OK);
+        }
+
         Expect(Token::RPAREN, CHECK_OK);
 
         // Make a block around the statement in case a lexical binding
@@ -3763,24 +3832,28 @@
         // expressions in head of the loop should actually have variables
         // resolved in the outer scope.
         Scope* body_scope = NewScope(for_scope, BLOCK_SCOPE);
-        scope_ = body_scope;
-        Block* block =
-            factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
-        Statement* body = ParseSubStatement(NULL, CHECK_OK);
-        block->statements()->Add(body, zone());
-        InitializeForEachStatement(loop, expression, enumerable, block,
-                                   is_destructuring);
-        scope_ = saved_scope;
-        body_scope->set_end_position(scanner()->location().end_pos);
-        body_scope = body_scope->FinalizeBlockScope();
-        if (body_scope != nullptr) {
+        {
+          BlockState block_state(&scope_, body_scope);
+          Block* block =
+              factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
+          Statement* body = ParseSubStatement(NULL, CHECK_OK);
+          block->statements()->Add(body, zone());
+          InitializeForEachStatement(loop, expression, enumerable, block,
+                                     is_destructuring);
+          body_scope->set_end_position(scanner()->location().end_pos);
+          body_scope = body_scope->FinalizeBlockScope();
           block->set_scope(body_scope);
         }
+
+        Statement* final_loop = loop->IsForOfStatement()
+            ? FinalizeForOfStatement(
+                loop->AsForOfStatement(), RelocInfo::kNoPosition)
+            : loop;
+
         for_scope->set_end_position(scanner()->location().end_pos);
         for_scope = for_scope->FinalizeBlockScope();
         DCHECK(for_scope == nullptr);
-        // Parsed for-in loop.
-        return loop;
+        return final_loop;
 
       } else {
         init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
@@ -3802,40 +3875,42 @@
   }
   Expect(Token::SEMICOLON, CHECK_OK);
 
+  Expression* cond = NULL;
+  Statement* next = NULL;
+  Statement* body = NULL;
+
   // If there are let bindings, then condition and the next statement of the
   // for loop must be parsed in a new scope.
-  Scope* inner_scope = NULL;
+  Scope* inner_scope = scope_;
   if (lexical_bindings.length() > 0) {
     inner_scope = NewScope(for_scope, BLOCK_SCOPE);
     inner_scope->set_start_position(scanner()->location().beg_pos);
-    scope_ = inner_scope;
   }
+  {
+    BlockState block_state(&scope_, inner_scope);
 
-  Expression* cond = NULL;
-  if (peek() != Token::SEMICOLON) {
-    cond = ParseExpression(true, CHECK_OK);
+    if (peek() != Token::SEMICOLON) {
+      cond = ParseExpression(true, CHECK_OK);
+    }
+    Expect(Token::SEMICOLON, CHECK_OK);
+
+    if (peek() != Token::RPAREN) {
+      Expression* exp = ParseExpression(true, CHECK_OK);
+      next = factory()->NewExpressionStatement(exp, exp->position());
+    }
+    Expect(Token::RPAREN, CHECK_OK);
+
+    body = ParseSubStatement(NULL, CHECK_OK);
   }
-  Expect(Token::SEMICOLON, CHECK_OK);
-
-  Statement* next = NULL;
-  if (peek() != Token::RPAREN) {
-    Expression* exp = ParseExpression(true, CHECK_OK);
-    next = factory()->NewExpressionStatement(exp, exp->position());
-  }
-  Expect(Token::RPAREN, CHECK_OK);
-
-  Statement* body = ParseSubStatement(NULL, CHECK_OK);
 
   Statement* result = NULL;
   if (lexical_bindings.length() > 0) {
-    scope_ = for_scope;
+    BlockState block_state(&scope_, for_scope);
     result = DesugarLexicalBindingsInForStatement(
-                 inner_scope, is_const, &lexical_bindings, loop, init, cond,
-                 next, body, CHECK_OK);
-    scope_ = saved_scope;
+        inner_scope, parsing_result.descriptor.mode, &lexical_bindings, loop,
+        init, cond, next, body, CHECK_OK);
     for_scope->set_end_position(scanner()->location().end_pos);
   } else {
-    scope_ = saved_scope;
     for_scope->set_end_position(scanner()->location().end_pos);
     for_scope = for_scope->FinalizeBlockScope();
     if (for_scope) {
@@ -4035,7 +4110,7 @@
   ParseArrowFunctionFormalParameters(parameters, expr, params_loc, ok);
   if (!*ok) return;
 
-  ExpressionClassifier classifier;
+  Type::ExpressionClassifier classifier(parser_);
   if (!parameters->is_simple) {
     classifier.RecordNonSimpleParameter();
   }
@@ -4069,7 +4144,6 @@
     const AstRawString* function_name, Scanner::Location function_name_location,
     FunctionNameValidity function_name_validity, FunctionKind kind,
     int function_token_pos, FunctionLiteral::FunctionType function_type,
-    FunctionLiteral::ArityRestriction arity_restriction,
     LanguageMode language_mode, bool* ok) {
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
@@ -4137,17 +4211,18 @@
   int materialized_literal_count = -1;
   int expected_property_count = -1;
   DuplicateFinder duplicate_finder(scanner()->unicode_cache());
-  ExpressionClassifier formals_classifier(&duplicate_finder);
   FunctionLiteral::EagerCompileHint eager_compile_hint =
       parenthesized_function_ ? FunctionLiteral::kShouldEagerCompile
                               : FunctionLiteral::kShouldLazyCompile;
   bool should_be_used_once_hint = false;
+  bool has_duplicate_parameters;
   // Parse function.
   {
     AstNodeFactory function_factory(ast_value_factory());
     FunctionState function_state(&function_state_, &scope_, scope, kind,
                                  &function_factory);
     scope_->SetScopeName(function_name);
+    ExpressionClassifier formals_classifier(this, &duplicate_finder);
 
     if (is_generator) {
       // For generators, allocating variables in contexts is currently a win
@@ -4172,11 +4247,15 @@
     Expect(Token::RPAREN, CHECK_OK);
     int formals_end_position = scanner()->location().end_pos;
 
-    CheckArityRestrictions(arity, arity_restriction,
-                           formals.has_rest, start_position,
+    CheckArityRestrictions(arity, kind, formals.has_rest, start_position,
                            formals_end_position, CHECK_OK);
     Expect(Token::LBRACE, CHECK_OK);
 
+    // Don't include the rest parameter into the function's formal parameter
+    // count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
+    // which says whether we need to create an arguments adaptor frame).
+    if (formals.has_rest) arity--;
+
     // Determine if the function can be parsed lazily. Lazy parsing is different
     // from lazy compilation; we need to parse more eagerly than we compile.
 
@@ -4321,10 +4400,10 @@
       // If body can be inspected, rewrite queued destructuring assignments
       ParserTraits::RewriteDestructuringAssignments();
     }
+    has_duplicate_parameters =
+      !formals_classifier.is_valid_formal_parameter_list_without_duplicates();
   }
 
-  bool has_duplicate_parameters =
-      !formals_classifier.is_valid_formal_parameter_list_without_duplicates();
   FunctionLiteral::ParameterFlag duplicate_parameters =
       has_duplicate_parameters ? FunctionLiteral::kHasDuplicateParameters
                                : FunctionLiteral::kNoDuplicateParameters;
@@ -4337,10 +4416,6 @@
   if (should_be_used_once_hint)
     function_literal->set_should_be_used_once_hint();
 
-  if (scope->has_rest_parameter()) {
-    function_literal->set_dont_optimize_reason(kRestParameter);
-  }
-
   if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
   return function_literal;
 }
@@ -4461,15 +4536,18 @@
         scope_(scope) {}
 
  private:
-  void VisitExpression(Expression* expr) {
-    RewritableAssignmentExpression* to_rewrite =
-        expr->AsRewritableAssignmentExpression();
+  void VisitExpression(Expression* expr) override {
+    RewritableExpression* to_rewrite = expr->AsRewritableExpression();
     if (to_rewrite == nullptr || to_rewrite->is_rewritten()) return;
 
     Parser::PatternRewriter::RewriteDestructuringAssignment(parser_, to_rewrite,
                                                             scope_);
   }
 
+  // Code in function literals does not need to be eagerly rewritten, it will be
+  // rewritten when scheduled.
+  void VisitFunctionLiteral(FunctionLiteral* expr) override {}
+
  private:
   Parser* parser_;
   Scope* scope_;
@@ -4497,7 +4575,6 @@
     descriptor.scope = scope_;
     descriptor.hoist_scope = nullptr;
     descriptor.mode = LET;
-    descriptor.needs_init = true;
     descriptor.declaration_pos = parameter.pattern->position();
     // The position that will be used by the AssignmentExpression
     // which copies from the temp parameter to the pattern.
@@ -4597,35 +4674,72 @@
   {
     BlockState block_state(&scope_, inner_scope);
 
-    // For generators, allocate and yield an iterator on function entry.
     if (IsGeneratorFunction(kind)) {
-      ZoneList<Expression*>* arguments =
-          new(zone()) ZoneList<Expression*>(0, zone());
-      CallRuntime* allocation = factory()->NewCallRuntime(
-          Runtime::kCreateJSGeneratorObject, arguments, pos);
-      VariableProxy* init_proxy = factory()->NewVariableProxy(
-          function_state_->generator_object_variable());
-      Assignment* assignment = factory()->NewAssignment(
-          Token::INIT, init_proxy, allocation, RelocInfo::kNoPosition);
-      VariableProxy* get_proxy = factory()->NewVariableProxy(
-          function_state_->generator_object_variable());
-      Yield* yield = factory()->NewYield(
-          get_proxy, assignment, Yield::kInitial, RelocInfo::kNoPosition);
-      body->Add(factory()->NewExpressionStatement(
-          yield, RelocInfo::kNoPosition), zone());
-    }
+      // We produce:
+      //
+      // try { InitialYield; ...body...; FinalYield }
+      // finally { %GeneratorClose(generator) }
+      //
+      // - InitialYield yields the actual generator object.
+      // - FinalYield yields {value: foo, done: true} where foo is the
+      //   completion value of body.  (This is needed here in case the body
+      //   falls through without an explicit return.)
+      // - Any return statement inside the body will be converted into a similar
+      //   FinalYield.
+      // - If the generator terminates for whatever reason, we must close it.
+      //   Hence the finally clause.
 
-    ParseStatementList(body, Token::RBRACE, CHECK_OK);
+      Block* try_block =
+          factory()->NewBlock(nullptr, 3, false, RelocInfo::kNoPosition);
 
-    if (IsGeneratorFunction(kind)) {
+      {
+        ZoneList<Expression*>* arguments =
+            new (zone()) ZoneList<Expression*>(0, zone());
+        CallRuntime* allocation = factory()->NewCallRuntime(
+            Runtime::kCreateJSGeneratorObject, arguments, pos);
+        VariableProxy* init_proxy = factory()->NewVariableProxy(
+            function_state_->generator_object_variable());
+        Assignment* assignment = factory()->NewAssignment(
+            Token::INIT, init_proxy, allocation, RelocInfo::kNoPosition);
+        VariableProxy* get_proxy = factory()->NewVariableProxy(
+            function_state_->generator_object_variable());
+        Yield* yield = factory()->NewYield(
+            get_proxy, assignment, Yield::kInitial, RelocInfo::kNoPosition);
+        try_block->statements()->Add(
+            factory()->NewExpressionStatement(yield, RelocInfo::kNoPosition),
+            zone());
+      }
+
+      ParseStatementList(try_block->statements(), Token::RBRACE, CHECK_OK);
+
       VariableProxy* get_proxy = factory()->NewVariableProxy(
           function_state_->generator_object_variable());
       Expression* undefined =
           factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
       Yield* yield = factory()->NewYield(get_proxy, undefined, Yield::kFinal,
                                          RelocInfo::kNoPosition);
-      body->Add(factory()->NewExpressionStatement(
-          yield, RelocInfo::kNoPosition), zone());
+      try_block->statements()->Add(
+          factory()->NewExpressionStatement(yield, RelocInfo::kNoPosition),
+          zone());
+
+      Block* finally_block =
+          factory()->NewBlock(nullptr, 1, false, RelocInfo::kNoPosition);
+      ZoneList<Expression*>* args =
+          new (zone()) ZoneList<Expression*>(1, zone());
+      VariableProxy* call_proxy = factory()->NewVariableProxy(
+          function_state_->generator_object_variable());
+      args->Add(call_proxy, zone());
+      Expression* call = factory()->NewCallRuntime(
+          Runtime::kGeneratorClose, args, RelocInfo::kNoPosition);
+      finally_block->statements()->Add(
+          factory()->NewExpressionStatement(call, RelocInfo::kNoPosition),
+          zone());
+
+      body->Add(factory()->NewTryFinallyStatement(try_block, finally_block,
+                                                  RelocInfo::kNoPosition),
+                zone());
+    } else {
+      ParseStatementList(body, Token::RBRACE, CHECK_OK);
     }
 
     if (IsSubclassConstructor(kind)) {
@@ -4682,6 +4796,13 @@
                     RelocInfo::kNoPosition));
   }
 
+  // ES6 14.6.1 Static Semantics: IsInTailPosition
+  // Mark collected return expressions that are in tail call position.
+  const List<Expression*>& expressions_in_tail_position =
+      function_state_->expressions_in_tail_position();
+  for (int i = 0; i < expressions_in_tail_position.length(); ++i) {
+    expressions_in_tail_position[i]->MarkTail();
+  }
   return result;
 }
 
@@ -4693,6 +4814,8 @@
   if (pre_parse_timer_ != NULL) {
     pre_parse_timer_->Start();
   }
+  TRACE_EVENT0("v8", "V8.PreParse");
+
   DCHECK_EQ(Token::LBRACE, scanner()->current_token());
 
   if (reusable_preparser_ == NULL) {
@@ -4709,6 +4832,7 @@
     SET_ALLOW(strong_mode);
     SET_ALLOW(harmony_do_expressions);
     SET_ALLOW(harmony_function_name);
+    SET_ALLOW(harmony_function_sent);
 #undef SET_ALLOW
   }
   PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
@@ -4751,19 +4875,17 @@
   VariableProxy* proxy = NULL;
   if (name != NULL) {
     proxy = NewUnresolved(name, CONST);
-    const bool is_class_declaration = true;
-    Declaration* declaration = factory()->NewVariableDeclaration(
-        proxy, CONST, block_scope, pos, is_class_declaration,
-        scope_->class_declaration_group_start());
+    Declaration* declaration =
+        factory()->NewVariableDeclaration(proxy, CONST, block_scope, pos);
     Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
   }
 
   Expression* extends = NULL;
   if (Check(Token::EXTENDS)) {
     block_scope->set_start_position(scanner()->location().end_pos);
-    ExpressionClassifier classifier;
+    ExpressionClassifier classifier(this);
     extends = ParseLeftHandSideExpression(&classifier, CHECK_OK);
-    extends = ParserTraits::RewriteNonPattern(extends, &classifier, CHECK_OK);
+    RewriteNonPattern(&classifier, CHECK_OK);
   } else {
     block_scope->set_start_position(scanner()->location().end_pos);
   }
@@ -4784,25 +4906,27 @@
     const bool is_static = false;
     bool is_computed_name = false;  // Classes do not care about computed
                                     // property names here.
-    ExpressionClassifier classifier;
-    const AstRawString* name = nullptr;
+    ExpressionClassifier classifier(this);
+    const AstRawString* property_name = nullptr;
     ObjectLiteral::Property* property = ParsePropertyDefinition(
         &checker, in_class, has_extends, is_static, &is_computed_name,
-        &has_seen_constructor, &classifier, &name, CHECK_OK);
-    property = ParserTraits::RewriteNonPatternObjectLiteralProperty(
-        property, &classifier, CHECK_OK);
+        &has_seen_constructor, &classifier, &property_name, CHECK_OK);
+    RewriteNonPattern(&classifier, CHECK_OK);
 
     if (has_seen_constructor && constructor == NULL) {
       constructor = GetPropertyValue(property)->AsFunctionLiteral();
       DCHECK_NOT_NULL(constructor);
+      constructor->set_raw_name(
+          name != nullptr ? name : ast_value_factory()->empty_string());
     } else {
       properties->Add(property, zone());
     }
 
     if (fni_ != NULL) fni_->Infer();
 
-    if (allow_harmony_function_name()) {
-      SetFunctionNameFromPropertyName(property, name);
+    if (allow_harmony_function_name() &&
+        property_name != ast_value_factory()->constructor_string()) {
+      SetFunctionNameFromPropertyName(property, property_name);
     }
   }
 
@@ -4810,8 +4934,8 @@
   int end_pos = scanner()->location().end_pos;
 
   if (constructor == NULL) {
-    constructor = DefaultConstructor(extends != NULL, block_scope, pos, end_pos,
-                                     block_scope->language_mode());
+    constructor = DefaultConstructor(name, extends != NULL, block_scope, pos,
+                                     end_pos, block_scope->language_mode());
   }
 
   // Note that we do not finalize this block scope because strong
@@ -4823,8 +4947,8 @@
     proxy->var()->set_initializer_position(end_pos);
   }
 
-  return factory()->NewClassLiteral(name, block_scope, proxy, extends,
-                                    constructor, properties, pos, end_pos);
+  return factory()->NewClassLiteral(block_scope, proxy, extends, constructor,
+                                    properties, pos, end_pos);
 }
 
 
@@ -4838,10 +4962,9 @@
   const AstRawString* name = ParseIdentifier(kAllowRestrictedIdentifiers,
                                              CHECK_OK);
   Scanner::Location spread_pos;
-  ExpressionClassifier classifier;
+  ExpressionClassifier classifier(this);
   ZoneList<Expression*>* args =
       ParseArguments(&spread_pos, &classifier, CHECK_OK);
-  args = RewriteNonPatternArguments(args, &classifier, CHECK_OK);
 
   DCHECK(!spread_pos.IsValid());
 
@@ -5051,6 +5174,12 @@
       isolate->CountUsage(v8::Isolate::UseCounterFeature(feature));
     }
   }
+  if (scanner_.FoundHtmlComment()) {
+    isolate->CountUsage(v8::Isolate::kHtmlComment);
+    if (script->line_offset() == 0 && script->column_offset() == 0) {
+      isolate->CountUsage(v8::Isolate::kHtmlCommentInExternalScript);
+    }
+  }
   isolate->counters()->total_preparse_skipped()->Increment(
       total_preparse_skipped_);
 }
@@ -5404,145 +5533,1381 @@
 }
 
 
-Expression* ParserTraits::RewriteNonPattern(
-    Expression* expr, const ExpressionClassifier* classifier, bool* ok) {
-  return parser_->RewriteNonPattern(expr, classifier, ok);
+void ParserTraits::RewriteNonPattern(Type::ExpressionClassifier* classifier,
+                                     bool* ok) {
+  parser_->RewriteNonPattern(classifier, ok);
 }
 
 
-ZoneList<Expression*>* ParserTraits::RewriteNonPatternArguments(
-    ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
-    bool* ok) {
-  return parser_->RewriteNonPatternArguments(args, classifier, ok);
+Zone* ParserTraits::zone() const {
+  return parser_->function_state_->scope()->zone();
 }
 
 
-ObjectLiteralProperty* ParserTraits::RewriteNonPatternObjectLiteralProperty(
-    ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
-    bool* ok) {
-  return parser_->RewriteNonPatternObjectLiteralProperty(property, classifier,
-                                                         ok);
+ZoneList<Expression*>* ParserTraits::GetNonPatternList() const {
+  return parser_->function_state_->non_patterns_to_rewrite();
 }
 
 
-Expression* Parser::RewriteNonPattern(Expression* expr,
-                                      const ExpressionClassifier* classifier,
-                                      bool* ok) {
-  // For the time being, this does no rewriting at all.
-  ValidateExpression(classifier, ok);
-  return expr;
-}
+class NonPatternRewriter : public AstExpressionRewriter {
+ public:
+  NonPatternRewriter(uintptr_t stack_limit, Parser* parser)
+      : AstExpressionRewriter(stack_limit), parser_(parser) {}
+  ~NonPatternRewriter() override {}
 
-
-ZoneList<Expression*>* Parser::RewriteNonPatternArguments(
-    ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
-    bool* ok) {
-  // For the time being, this does no rewriting at all.
-  ValidateExpression(classifier, ok);
-  return args;
-}
-
-
-ObjectLiteralProperty* Parser::RewriteNonPatternObjectLiteralProperty(
-    ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
-    bool* ok) {
-  if (property != nullptr) {
-    Expression* key = RewriteNonPattern(property->key(), classifier, ok);
-    property->set_key(key);
-    Expression* value = RewriteNonPattern(property->value(), classifier, ok);
-    property->set_value(value);
+ private:
+  bool RewriteExpression(Expression* expr) override {
+    if (expr->IsRewritableExpression()) return true;
+    // Rewrite only what could have been a pattern but is not.
+    if (expr->IsArrayLiteral()) {
+      // Spread rewriting in array literals.
+      ArrayLiteral* lit = expr->AsArrayLiteral();
+      VisitExpressions(lit->values());
+      replacement_ = parser_->RewriteSpreads(lit);
+      return false;
+    }
+    if (expr->IsObjectLiteral()) {
+      return true;
+    }
+    if (expr->IsBinaryOperation() &&
+        expr->AsBinaryOperation()->op() == Token::COMMA) {
+      return true;
+    }
+    // Everything else does not need rewriting.
+    return false;
   }
-  return property;
+
+  void VisitObjectLiteralProperty(ObjectLiteralProperty* property) override {
+    if (property == nullptr) return;
+    // Do not rewrite (computed) key expressions
+    AST_REWRITE_PROPERTY(Expression, property, value);
+  }
+
+  Parser* parser_;
+};
+
+
+void Parser::RewriteNonPattern(ExpressionClassifier* classifier, bool* ok) {
+  ValidateExpression(classifier, ok);
+  if (!*ok) return;
+  auto non_patterns_to_rewrite = function_state_->non_patterns_to_rewrite();
+  int begin = classifier->GetNonPatternBegin();
+  int end = non_patterns_to_rewrite->length();
+  if (begin < end) {
+    NonPatternRewriter rewriter(stack_limit_, this);
+    for (int i = begin; i < end; i++) {
+      DCHECK(non_patterns_to_rewrite->at(i)->IsRewritableExpression());
+      rewriter.Rewrite(non_patterns_to_rewrite->at(i));
+    }
+    non_patterns_to_rewrite->Rewind(begin);
+  }
 }
 
 
 void Parser::RewriteDestructuringAssignments() {
-  FunctionState* func = function_state_;
   if (!allow_harmony_destructuring_assignment()) return;
-  const List<DestructuringAssignment>& assignments =
-      func->destructuring_assignments_to_rewrite();
+  const auto& assignments =
+      function_state_->destructuring_assignments_to_rewrite();
   for (int i = assignments.length() - 1; i >= 0; --i) {
     // Rewrite list in reverse, so that nested assignment patterns are rewritten
     // correctly.
-    DestructuringAssignment pair = assignments.at(i);
-    RewritableAssignmentExpression* to_rewrite =
-        pair.assignment->AsRewritableAssignmentExpression();
-    Scope* scope = pair.scope;
+    const DestructuringAssignment& pair = assignments.at(i);
+    RewritableExpression* to_rewrite =
+        pair.assignment->AsRewritableExpression();
     DCHECK_NOT_NULL(to_rewrite);
     if (!to_rewrite->is_rewritten()) {
-      PatternRewriter::RewriteDestructuringAssignment(this, to_rewrite, scope);
+      PatternRewriter::RewriteDestructuringAssignment(this, to_rewrite,
+                                                      pair.scope);
     }
   }
 }
 
 
+Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
+  // Array literals containing spreads are rewritten using do expressions, e.g.
+  //    [1, 2, 3, ...x, 4, ...y, 5]
+  // is roughly rewritten as:
+  //    do {
+  //      $R = [1, 2, 3];
+  //      for ($i of x) %AppendElement($R, $i);
+  //      %AppendElement($R, 4);
+  //      for ($j of y) %AppendElement($R, $j);
+  //      %AppendElement($R, 5);
+  //      $R
+  //    }
+  // where $R, $i and $j are fresh temporary variables.
+  ZoneList<Expression*>::iterator s = lit->FirstSpread();
+  if (s == lit->EndValue()) return nullptr;  // no spread, no rewriting...
+  Variable* result =
+      scope_->NewTemporary(ast_value_factory()->dot_result_string());
+  // NOTE: The value assigned to R is the whole original array literal,
+  // spreads included. This will be fixed before the rewritten AST is returned.
+  // $R = lit
+  Expression* init_result =
+      factory()->NewAssignment(Token::INIT, factory()->NewVariableProxy(result),
+                               lit, RelocInfo::kNoPosition);
+  Block* do_block =
+      factory()->NewBlock(nullptr, 16, false, RelocInfo::kNoPosition);
+  do_block->statements()->Add(
+      factory()->NewExpressionStatement(init_result, RelocInfo::kNoPosition),
+      zone());
+  // Traverse the array literal starting from the first spread.
+  while (s != lit->EndValue()) {
+    Expression* value = *s++;
+    Spread* spread = value->AsSpread();
+    if (spread == nullptr) {
+      // If the element is not a spread, we're adding a single:
+      // %AppendElement($R, value)
+      ZoneList<Expression*>* append_element_args = NewExpressionList(2, zone());
+      append_element_args->Add(factory()->NewVariableProxy(result), zone());
+      append_element_args->Add(value, zone());
+      do_block->statements()->Add(
+          factory()->NewExpressionStatement(
+              factory()->NewCallRuntime(Runtime::kAppendElement,
+                                        append_element_args,
+                                        RelocInfo::kNoPosition),
+              RelocInfo::kNoPosition),
+          zone());
+    } else {
+      // If it's a spread, we're adding a for/of loop iterating through it.
+      Variable* each =
+          scope_->NewTemporary(ast_value_factory()->dot_for_string());
+      Expression* subject = spread->expression();
+      Variable* iterator =
+          scope_->NewTemporary(ast_value_factory()->dot_iterator_string());
+      Variable* element =
+          scope_->NewTemporary(ast_value_factory()->dot_result_string());
+      // iterator = subject[Symbol.iterator]()
+      Expression* assign_iterator = factory()->NewAssignment(
+          Token::ASSIGN, factory()->NewVariableProxy(iterator),
+          GetIterator(subject, factory(), spread->expression_position()),
+          subject->position());
+      // !%_IsJSReceiver(element = iterator.next()) &&
+      //     %ThrowIteratorResultNotAnObject(element)
+      Expression* next_element;
+      {
+        // element = iterator.next()
+        Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+        next_element = BuildIteratorNextResult(iterator_proxy, element,
+                                               spread->expression_position());
+      }
+      // element.done
+      Expression* element_done;
+      {
+        Expression* done_literal = factory()->NewStringLiteral(
+            ast_value_factory()->done_string(), RelocInfo::kNoPosition);
+        Expression* element_proxy = factory()->NewVariableProxy(element);
+        element_done = factory()->NewProperty(element_proxy, done_literal,
+                                              RelocInfo::kNoPosition);
+      }
+      // each = element.value
+      Expression* assign_each;
+      {
+        Expression* value_literal = factory()->NewStringLiteral(
+            ast_value_factory()->value_string(), RelocInfo::kNoPosition);
+        Expression* element_proxy = factory()->NewVariableProxy(element);
+        Expression* element_value = factory()->NewProperty(
+            element_proxy, value_literal, RelocInfo::kNoPosition);
+        assign_each = factory()->NewAssignment(
+            Token::ASSIGN, factory()->NewVariableProxy(each), element_value,
+            RelocInfo::kNoPosition);
+      }
+      // %AppendElement($R, each)
+      Statement* append_body;
+      {
+        ZoneList<Expression*>* append_element_args =
+            NewExpressionList(2, zone());
+        append_element_args->Add(factory()->NewVariableProxy(result), zone());
+        append_element_args->Add(factory()->NewVariableProxy(each), zone());
+        append_body = factory()->NewExpressionStatement(
+            factory()->NewCallRuntime(Runtime::kAppendElement,
+                                      append_element_args,
+                                      RelocInfo::kNoPosition),
+            RelocInfo::kNoPosition);
+      }
+      // for (each of spread) %AppendElement($R, each)
+      ForEachStatement* loop = factory()->NewForEachStatement(
+          ForEachStatement::ITERATE, nullptr, RelocInfo::kNoPosition);
+      ForOfStatement* for_of = loop->AsForOfStatement();
+      for_of->Initialize(factory()->NewVariableProxy(each), subject,
+                         append_body, iterator, assign_iterator, next_element,
+                         element_done, assign_each);
+      do_block->statements()->Add(for_of, zone());
+    }
+  }
+  // Now, rewind the original array literal to truncate everything from the
+  // first spread (included) until the end. This fixes $R's initialization.
+  lit->RewindSpreads();
+  return factory()->NewDoExpression(do_block, result, lit->position());
+}
+
+
 void ParserTraits::QueueDestructuringAssignmentForRewriting(Expression* expr) {
-  DCHECK(expr->IsRewritableAssignmentExpression());
+  DCHECK(expr->IsRewritableExpression());
   parser_->function_state_->AddDestructuringAssignment(
       Parser::DestructuringAssignment(expr, parser_->scope_));
 }
 
 
+void ParserTraits::QueueNonPatternForRewriting(Expression* expr) {
+  DCHECK(expr->IsRewritableExpression());
+  parser_->function_state_->AddNonPatternForRewriting(expr);
+}
+
+
 void ParserTraits::SetFunctionNameFromPropertyName(
     ObjectLiteralProperty* property, const AstRawString* name) {
   Expression* value = property->value();
-  if (!value->IsFunctionLiteral() && !value->IsClassLiteral()) return;
 
-  // TODO(adamk): Support computed names.
+  // Computed name setting must happen at runtime.
   if (property->is_computed_name()) return;
+
+  // Getter and setter names are handled here because their names
+  // change in ES2015, even though they are not anonymous.
+  auto function = value->AsFunctionLiteral();
+  if (function != nullptr) {
+    bool is_getter = property->kind() == ObjectLiteralProperty::GETTER;
+    bool is_setter = property->kind() == ObjectLiteralProperty::SETTER;
+    if (is_getter || is_setter) {
+      DCHECK_NOT_NULL(name);
+      const AstRawString* prefix =
+          is_getter ? parser_->ast_value_factory()->get_space_string()
+                    : parser_->ast_value_factory()->set_space_string();
+      function->set_raw_name(
+          parser_->ast_value_factory()->NewConsString(prefix, name));
+      return;
+    }
+  }
+
+  if (!value->IsAnonymousFunctionDefinition()) return;
   DCHECK_NOT_NULL(name);
 
   // Ignore "__proto__" as a name when it's being used to set the [[Prototype]]
   // of an object literal.
   if (property->kind() == ObjectLiteralProperty::PROTOTYPE) return;
 
-  if (value->IsFunctionLiteral()) {
-    auto function = value->AsFunctionLiteral();
-    if (function->is_anonymous()) {
-      if (property->kind() == ObjectLiteralProperty::GETTER) {
-        function->set_raw_name(parser_->ast_value_factory()->NewConsString(
-            parser_->ast_value_factory()->get_space_string(), name));
-      } else if (property->kind() == ObjectLiteralProperty::SETTER) {
-        function->set_raw_name(parser_->ast_value_factory()->NewConsString(
-            parser_->ast_value_factory()->set_space_string(), name));
-      } else {
-        function->set_raw_name(name);
-        DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
-      }
-    }
+  if (function != nullptr) {
+    function->set_raw_name(name);
+    DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
   } else {
     DCHECK(value->IsClassLiteral());
     DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
-    auto class_literal = value->AsClassLiteral();
-    if (class_literal->raw_name() == nullptr) {
-      class_literal->set_raw_name(name);
-    }
+    value->AsClassLiteral()->constructor()->set_raw_name(name);
   }
 }
 
 
 void ParserTraits::SetFunctionNameFromIdentifierRef(Expression* value,
                                                     Expression* identifier) {
-  if (!value->IsFunctionLiteral() && !value->IsClassLiteral()) return;
+  if (!value->IsAnonymousFunctionDefinition()) return;
   if (!identifier->IsVariableProxy()) return;
 
   auto name = identifier->AsVariableProxy()->raw_name();
   DCHECK_NOT_NULL(name);
 
-  if (value->IsFunctionLiteral()) {
-    auto function = value->AsFunctionLiteral();
-    if (function->is_anonymous()) {
-      function->set_raw_name(name);
-    }
+  auto function = value->AsFunctionLiteral();
+  if (function != nullptr) {
+    function->set_raw_name(name);
   } else {
     DCHECK(value->IsClassLiteral());
-    auto class_literal = value->AsClassLiteral();
-    if (class_literal->raw_name() == nullptr) {
-      class_literal->set_raw_name(name);
-    }
+    value->AsClassLiteral()->constructor()->set_raw_name(name);
   }
 }
 
 
+// Desugaring of yield*
+// ====================
+//
+// With the help of do-expressions and function.sent, we desugar yield* into a
+// loop containing a "raw" yield (a yield that doesn't wrap an iterator result
+// object around its argument).  Concretely, "yield* iterable" turns into
+// roughly the following code:
+//
+//   do {
+//     const kNext = 0;
+//     const kReturn = 1;
+//     const kThrow = 2;
+//
+//     let input = function.sent;
+//     let mode = kNext;
+//     let output = undefined;
+//
+//     let iterator = iterable[Symbol.iterator]();
+//     if (!IS_RECEIVER(iterator)) throw MakeTypeError(kSymbolIteratorInvalid);
+//
+//     while (true) {
+//       // From the generator to the iterator:
+//       // Forward input according to resume mode and obtain output.
+//       switch (mode) {
+//         case kNext:
+//           output = iterator.next(input);
+//           if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+//           break;
+//         case kReturn:
+//           IteratorClose(iterator, input, output);  // See below.
+//           break;
+//         case kThrow:
+//           let iteratorThrow = iterator.throw;
+//           if (IS_NULL_OR_UNDEFINED(iteratorThrow)) {
+//             IteratorClose(iterator);  // See below.
+//             throw MakeTypeError(kThrowMethodMissing);
+//           }
+//           output = %_Call(iteratorThrow, iterator, input);
+//           if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+//           break;
+//       }
+//       if (output.done) break;
+//
+//       // From the generator to its user:
+//       // Forward output, receive new input, and determine resume mode.
+//       mode = kReturn;
+//       try {
+//         try {
+//           RawYield(output);  // See explanation above.
+//           mode = kNext;
+//         } catch (error) {
+//           mode = kThrow;
+//         }
+//       } finally {
+//         input = function.sent;
+//         continue;
+//       }
+//     }
+//
+//     output.value;
+//   }
+//
+// IteratorClose(iterator) expands to the following:
+//
+//   let iteratorReturn = iterator.return;
+//   if (IS_NULL_OR_UNDEFINED(iteratorReturn)) return;
+//   let output = %_Call(iteratorReturn, iterator);
+//   if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+//
+// IteratorClose(iterator, input, output) expands to the following:
+//
+//   let iteratorReturn = iterator.return;
+//   if (IS_NULL_OR_UNDEFINED(iteratorReturn)) return input;
+//   output = %_Call(iteratorReturn, iterator, input);
+//   if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+
+
+Expression* ParserTraits::RewriteYieldStar(
+    Expression* generator, Expression* iterable, int pos) {
+
+  const int nopos = RelocInfo::kNoPosition;
+
+  auto factory = parser_->factory();
+  auto avfactory = parser_->ast_value_factory();
+  auto scope = parser_->scope_;
+  auto zone = parser_->zone();
+
+
+  // Forward definition for break/continue statements.
+  WhileStatement* loop = factory->NewWhileStatement(nullptr, nopos);
+
+
+  // let input = undefined;
+  Variable* var_input = scope->NewTemporary(avfactory->empty_string());
+  Statement* initialize_input;
+  {
+    Expression* input_proxy = factory->NewVariableProxy(var_input);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, input_proxy, factory->NewUndefinedLiteral(nopos), nopos);
+    initialize_input = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+
+  // let mode = kNext;
+  Variable* var_mode = scope->NewTemporary(avfactory->empty_string());
+  Statement* initialize_mode;
+  {
+    Expression* mode_proxy = factory->NewVariableProxy(var_mode);
+    Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+    Expression* assignment =
+        factory->NewAssignment(Token::ASSIGN, mode_proxy, knext, nopos);
+    initialize_mode = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+
+  // let output = undefined;
+  Variable* var_output = scope->NewTemporary(avfactory->empty_string());
+  Statement* initialize_output;
+  {
+    Expression* output_proxy = factory->NewVariableProxy(var_output);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, output_proxy, factory->NewUndefinedLiteral(nopos),
+        nopos);
+    initialize_output = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+
+  // let iterator = iterable[Symbol.iterator];
+  Variable* var_iterator = scope->NewTemporary(avfactory->empty_string());
+  Statement* get_iterator;
+  {
+    Expression* iterator = GetIterator(iterable, factory, nopos);
+    Expression* iterator_proxy = factory->NewVariableProxy(var_iterator);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, iterator_proxy, iterator, nopos);
+    get_iterator = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+
+  // if (!IS_RECEIVER(iterator)) throw MakeTypeError(kSymbolIteratorInvalid);
+  Statement* validate_iterator;
+  {
+    Expression* is_receiver_call;
+    {
+      auto args = new (zone) ZoneList<Expression*>(1, zone);
+      args->Add(factory->NewVariableProxy(var_iterator), zone);
+      is_receiver_call =
+          factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+    }
+
+    Statement* throw_call;
+    {
+      Expression* call = NewThrowTypeError(
+          MessageTemplate::kSymbolIteratorInvalid, avfactory->empty_string(),
+          nopos);
+      throw_call = factory->NewExpressionStatement(call, nopos);
+    }
+
+    validate_iterator = factory->NewIfStatement(
+        is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+  }
+
+
+  // output = iterator.next(input);
+  Statement* call_next;
+  {
+    Expression* iterator_proxy = factory->NewVariableProxy(var_iterator);
+    Expression* literal =
+        factory->NewStringLiteral(avfactory->next_string(), nopos);
+    Expression* next_property =
+        factory->NewProperty(iterator_proxy, literal, nopos);
+    Expression* input_proxy = factory->NewVariableProxy(var_input);
+    auto args = new (zone) ZoneList<Expression*>(1, zone);
+    args->Add(input_proxy, zone);
+    Expression* call = factory->NewCall(next_property, args, nopos);
+    Expression* output_proxy = factory->NewVariableProxy(var_output);
+    Expression* assignment =
+        factory->NewAssignment(Token::ASSIGN, output_proxy, call, nopos);
+    call_next = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+
+  // if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+  Statement* validate_next_output;
+  {
+    Expression* is_receiver_call;
+    {
+      auto args = new (zone) ZoneList<Expression*>(1, zone);
+      args->Add(factory->NewVariableProxy(var_output), zone);
+      is_receiver_call =
+          factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+    }
+
+    Statement* throw_call;
+    {
+      auto args = new (zone) ZoneList<Expression*>(1, zone);
+      args->Add(factory->NewVariableProxy(var_output), zone);
+      Expression* call = factory->NewCallRuntime(
+          Runtime::kThrowIteratorResultNotAnObject, args, nopos);
+      throw_call = factory->NewExpressionStatement(call, nopos);
+    }
+
+    validate_next_output = factory->NewIfStatement(
+        is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+  }
+
+
+  // let iteratorThrow = iterator.throw;
+  Variable* var_throw = scope->NewTemporary(avfactory->empty_string());
+  Statement* get_throw;
+  {
+    Expression* iterator_proxy = factory->NewVariableProxy(var_iterator);
+    Expression* literal =
+        factory->NewStringLiteral(avfactory->throw_string(), nopos);
+    Expression* property =
+        factory->NewProperty(iterator_proxy, literal, nopos);
+    Expression* throw_proxy = factory->NewVariableProxy(var_throw);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, throw_proxy, property, nopos);
+    get_throw = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+
+  // if (IS_NULL_OR_UNDEFINED(iteratorThrow) {
+  //   IteratorClose(iterator);
+  //   throw MakeTypeError(kThrowMethodMissing);
+  // }
+  Statement* check_throw;
+  {
+    Expression* condition = factory->NewCompareOperation(
+        Token::EQ, factory->NewVariableProxy(var_throw),
+        factory->NewNullLiteral(nopos), nopos);
+
+    Expression* call = NewThrowTypeError(
+        MessageTemplate::kThrowMethodMissing,
+        avfactory->empty_string(), nopos);
+    Statement* throw_call = factory->NewExpressionStatement(call, nopos);
+
+    Block* then = factory->NewBlock(nullptr, 4+1, false, nopos);
+    Variable* var_tmp = scope->NewTemporary(avfactory->empty_string());
+    BuildIteratorClose(
+        then->statements(), var_iterator, factory->NewUndefinedLiteral(nopos),
+        var_tmp);
+    then->statements()->Add(throw_call, zone);
+    check_throw = factory->NewIfStatement(
+        condition, then, factory->NewEmptyStatement(nopos), nopos);
+  }
+
+
+  // output = %_Call(iteratorThrow, iterator, input);
+  Statement* call_throw;
+  {
+    auto args = new (zone) ZoneList<Expression*>(3, zone);
+    args->Add(factory->NewVariableProxy(var_throw), zone);
+    args->Add(factory->NewVariableProxy(var_iterator), zone);
+    args->Add(factory->NewVariableProxy(var_input), zone);
+    Expression* call =
+        factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, factory->NewVariableProxy(var_output), call, nopos);
+    call_throw = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+
+  // if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+  Statement* validate_throw_output;
+  {
+    Expression* is_receiver_call;
+    {
+      auto args = new (zone) ZoneList<Expression*>(1, zone);
+      args->Add(factory->NewVariableProxy(var_output), zone);
+      is_receiver_call =
+          factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+    }
+
+    Statement* throw_call;
+    {
+      auto args = new (zone) ZoneList<Expression*>(1, zone);
+      args->Add(factory->NewVariableProxy(var_output), zone);
+      Expression* call = factory->NewCallRuntime(
+          Runtime::kThrowIteratorResultNotAnObject, args, nopos);
+      throw_call = factory->NewExpressionStatement(call, nopos);
+    }
+
+    validate_throw_output = factory->NewIfStatement(
+        is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+  }
+
+
+  // if (output.done) break;
+  Statement* if_done;
+  {
+    Expression* output_proxy = factory->NewVariableProxy(var_output);
+    Expression* literal =
+        factory->NewStringLiteral(avfactory->done_string(), nopos);
+    Expression* property = factory->NewProperty(output_proxy, literal, nopos);
+    BreakStatement* break_loop = factory->NewBreakStatement(loop, nopos);
+    if_done = factory->NewIfStatement(
+        property, break_loop, factory->NewEmptyStatement(nopos), nopos);
+  }
+
+
+  // mode = kReturn;
+  Statement* set_mode_return;
+  {
+    Expression* mode_proxy = factory->NewVariableProxy(var_mode);
+    Expression* kreturn =
+        factory->NewSmiLiteral(JSGeneratorObject::RETURN, nopos);
+    Expression* assignment =
+        factory->NewAssignment(Token::ASSIGN, mode_proxy, kreturn, nopos);
+    set_mode_return = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+
+  // RawYield(output);
+  Statement* yield_output;
+  {
+    Expression* output_proxy = factory->NewVariableProxy(var_output);
+    Yield* yield = factory->NewYield(
+        generator, output_proxy, Yield::kInitial, nopos);
+    yield_output = factory->NewExpressionStatement(yield, nopos);
+  }
+
+
+  // mode = kNext;
+  Statement* set_mode_next;
+  {
+    Expression* mode_proxy = factory->NewVariableProxy(var_mode);
+    Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+    Expression* assignment =
+        factory->NewAssignment(Token::ASSIGN, mode_proxy, knext, nopos);
+    set_mode_next = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+
+  // mode = kThrow;
+  Statement* set_mode_throw;
+  {
+    Expression* mode_proxy = factory->NewVariableProxy(var_mode);
+    Expression* kthrow =
+        factory->NewSmiLiteral(JSGeneratorObject::THROW, nopos);
+    Expression* assignment =
+        factory->NewAssignment(Token::ASSIGN, mode_proxy, kthrow, nopos);
+    set_mode_throw = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+
+  // input = function.sent;
+  Statement* get_input;
+  {
+    Expression* function_sent = FunctionSentExpression(scope, factory, nopos);
+    Expression* input_proxy = factory->NewVariableProxy(var_input);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, input_proxy, function_sent, nopos);
+    get_input = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+
+  // output.value;
+  Statement* get_value;
+  {
+    Expression* output_proxy = factory->NewVariableProxy(var_output);
+    Expression* literal =
+        factory->NewStringLiteral(avfactory->value_string(), nopos);
+    Expression* property = factory->NewProperty(output_proxy, literal, nopos);
+    get_value = factory->NewExpressionStatement(property, nopos);
+  }
+
+
+  // Now put things together.
+
+
+  // try { ... } catch(e) { ... }
+  Statement* try_catch;
+  {
+    Block* try_block = factory->NewBlock(nullptr, 2, false, nopos);
+    try_block->statements()->Add(yield_output, zone);
+    try_block->statements()->Add(set_mode_next, zone);
+
+    Block* catch_block = factory->NewBlock(nullptr, 1, false, nopos);
+    catch_block->statements()->Add(set_mode_throw, zone);
+
+    Scope* catch_scope = NewScope(scope, CATCH_SCOPE);
+    const AstRawString* name = avfactory->dot_catch_string();
+    Variable* catch_variable =
+        catch_scope->DeclareLocal(name, VAR, kCreatedInitialized,
+                                               Variable::NORMAL);
+
+    try_catch = factory->NewTryCatchStatement(
+        try_block, catch_scope, catch_variable, catch_block, nopos);
+  }
+
+
+  // try { ... } finally { ... }
+  Statement* try_finally;
+  {
+    Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+    try_block->statements()->Add(try_catch, zone);
+
+    Block* finally = factory->NewBlock(nullptr, 2, false, nopos);
+    finally->statements()->Add(get_input, zone);
+    finally->statements()->Add(
+        factory->NewContinueStatement(loop, nopos), zone);
+
+    try_finally = factory->NewTryFinallyStatement(try_block, finally, nopos);
+  }
+
+
+  // switch (mode) { ... }
+  SwitchStatement* switch_mode = factory->NewSwitchStatement(nullptr, nopos);
+  {
+    auto case_next = new (zone) ZoneList<Statement*>(3, zone);
+    case_next->Add(call_next, zone);
+    case_next->Add(validate_next_output, zone);
+    case_next->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
+
+    auto case_return = new (zone) ZoneList<Statement*>(5, zone);
+    BuildIteratorClose(case_return, var_iterator,
+                       factory->NewVariableProxy(var_input, nopos), var_output);
+    case_return->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
+
+    auto case_throw = new (zone) ZoneList<Statement*>(5, zone);
+    case_throw->Add(get_throw, zone);
+    case_throw->Add(check_throw, zone);
+    case_throw->Add(call_throw, zone);
+    case_throw->Add(validate_throw_output, zone);
+    case_throw->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
+
+    auto cases = new (zone) ZoneList<CaseClause*>(3, zone);
+    Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+    Expression* kreturn =
+        factory->NewSmiLiteral(JSGeneratorObject::RETURN, nopos);
+    Expression* kthrow =
+        factory->NewSmiLiteral(JSGeneratorObject::THROW, nopos);
+    cases->Add(factory->NewCaseClause(knext, case_next, nopos), zone);
+    cases->Add(factory->NewCaseClause(kreturn, case_return, nopos), zone);
+    cases->Add(factory->NewCaseClause(kthrow, case_throw, nopos), zone);
+
+    switch_mode->Initialize(factory->NewVariableProxy(var_mode), cases);
+  }
+
+
+  // while (true) { ... }
+  // Already defined earlier: WhileStatement* loop = ...
+  {
+    Block* loop_body = factory->NewBlock(nullptr, 4, false, nopos);
+    loop_body->statements()->Add(switch_mode, zone);
+    loop_body->statements()->Add(if_done, zone);
+    loop_body->statements()->Add(set_mode_return, zone);
+    loop_body->statements()->Add(try_finally, zone);
+
+    loop->Initialize(factory->NewBooleanLiteral(true, nopos), loop_body);
+  }
+
+
+  // do { ... }
+  DoExpression* yield_star;
+  {
+    // The rewriter needs to process the get_value statement only, hence we
+    // put the preceding statements into an init block.
+
+    Block* do_block_ = factory->NewBlock(nullptr, 6, true, nopos);
+    do_block_->statements()->Add(initialize_input, zone);
+    do_block_->statements()->Add(initialize_mode, zone);
+    do_block_->statements()->Add(initialize_output, zone);
+    do_block_->statements()->Add(get_iterator, zone);
+    do_block_->statements()->Add(validate_iterator, zone);
+    do_block_->statements()->Add(loop, zone);
+
+    Block* do_block = factory->NewBlock(nullptr, 2, false, nopos);
+    do_block->statements()->Add(do_block_, zone);
+    do_block->statements()->Add(get_value, zone);
+
+    Variable* dot_result = scope->NewTemporary(avfactory->dot_result_string());
+    yield_star = factory->NewDoExpression(do_block, dot_result, nopos);
+    Rewriter::Rewrite(parser_, yield_star, avfactory);
+  }
+
+  return yield_star;
+}
+
+// Desugaring of (lhs) instanceof (rhs)
+// ====================================
+//
+// We desugar instanceof into a load of property @@hasInstance on the rhs.
+// We end up with roughly the following code (O, C):
+//
+//   do {
+//     let O = lhs;
+//     let C = rhs;
+//     if (!IS_RECEIVER(C)) throw MakeTypeError(kNonObjectInInstanceOfCheck);
+//     let handler_result = C[Symbol.hasInstance];
+//     if (handler_result === undefined) {
+//       if (!IS_CALLABLE(C)) {
+//         throw MakeTypeError(kCalledNonCallableInstanceOf);
+//       }
+//       handler_result = %ordinary_has_instance(C, O);
+//     } else {
+//       handler_result = !!(%_Call(handler_result, C, O));
+//     }
+//     handler_result;
+//   }
+//
+Expression* ParserTraits::RewriteInstanceof(Expression* lhs, Expression* rhs,
+                                            int pos) {
+  const int nopos = RelocInfo::kNoPosition;
+
+  auto factory = parser_->factory();
+  auto avfactory = parser_->ast_value_factory();
+  auto scope = parser_->scope_;
+  auto zone = parser_->zone();
+
+  // let O = lhs;
+  Variable* var_O = scope->NewTemporary(avfactory->empty_string());
+  Statement* get_O;
+  {
+    Expression* O_proxy = factory->NewVariableProxy(var_O);
+    Expression* assignment =
+        factory->NewAssignment(Token::ASSIGN, O_proxy, lhs, nopos);
+    get_O = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+  // let C = lhs;
+  Variable* var_C = scope->NewTemporary(avfactory->empty_string());
+  Statement* get_C;
+  {
+    Expression* C_proxy = factory->NewVariableProxy(var_C);
+    Expression* assignment =
+        factory->NewAssignment(Token::ASSIGN, C_proxy, rhs, nopos);
+    get_C = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+  // if (!IS_RECEIVER(C)) throw MakeTypeError(kNonObjectInInstanceOfCheck);
+  Statement* validate_C;
+  {
+    auto args = new (zone) ZoneList<Expression*>(1, zone);
+    args->Add(factory->NewVariableProxy(var_C), zone);
+    Expression* is_receiver_call =
+        factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+    Expression* call =
+        NewThrowTypeError(MessageTemplate::kNonObjectInInstanceOfCheck,
+                          avfactory->empty_string(), nopos);
+    Statement* throw_call = factory->NewExpressionStatement(call, nopos);
+
+    validate_C =
+        factory->NewIfStatement(is_receiver_call,
+                                factory->NewEmptyStatement(nopos),
+                                throw_call,
+                                nopos);
+  }
+
+  // let handler_result = C[Symbol.hasInstance];
+  Variable* var_handler_result = scope->NewTemporary(avfactory->empty_string());
+  Statement* initialize_handler;
+  {
+    Expression* hasInstance_symbol_literal =
+        factory->NewSymbolLiteral("hasInstance_symbol", RelocInfo::kNoPosition);
+    Expression* prop = factory->NewProperty(factory->NewVariableProxy(var_C),
+                                            hasInstance_symbol_literal, pos);
+    Expression* handler_proxy = factory->NewVariableProxy(var_handler_result);
+    Expression* assignment =
+        factory->NewAssignment(Token::ASSIGN, handler_proxy, prop, nopos);
+    initialize_handler = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+  // if (handler_result === undefined) {
+  //   if (!IS_CALLABLE(C)) {
+  //     throw MakeTypeError(kCalledNonCallableInstanceOf);
+  //   }
+  //   result = %ordinary_has_instance(C, O);
+  // } else {
+  //   handler_result = !!%_Call(handler_result, C, O);
+  // }
+  Statement* call_handler;
+  {
+    Expression* condition = factory->NewCompareOperation(
+        Token::EQ_STRICT, factory->NewVariableProxy(var_handler_result),
+        factory->NewUndefinedLiteral(nopos), nopos);
+
+    Block* then_side = factory->NewBlock(nullptr, 2, false, nopos);
+    {
+      Expression* throw_expr =
+          NewThrowTypeError(MessageTemplate::kCalledNonCallableInstanceOf,
+                            avfactory->empty_string(), nopos);
+      Statement* validate_C = CheckCallable(var_C, throw_expr);
+      ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(2, zone);
+      args->Add(factory->NewVariableProxy(var_C), zone);
+      args->Add(factory->NewVariableProxy(var_O), zone);
+      CallRuntime* call = factory->NewCallRuntime(
+          Context::ORDINARY_HAS_INSTANCE_INDEX, args, pos);
+      Expression* result_proxy = factory->NewVariableProxy(var_handler_result);
+      Expression* assignment =
+          factory->NewAssignment(Token::ASSIGN, result_proxy, call, nopos);
+      Statement* assignment_return =
+          factory->NewExpressionStatement(assignment, nopos);
+
+      then_side->statements()->Add(validate_C, zone);
+      then_side->statements()->Add(assignment_return, zone);
+    }
+
+    Statement* else_side;
+    {
+      auto args = new (zone) ZoneList<Expression*>(3, zone);
+      args->Add(factory->NewVariableProxy(var_handler_result), zone);
+      args->Add(factory->NewVariableProxy(var_C), zone);
+      args->Add(factory->NewVariableProxy(var_O), zone);
+      Expression* call =
+          factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+      Expression* inner_not =
+          factory->NewUnaryOperation(Token::NOT, call, nopos);
+      Expression* outer_not =
+          factory->NewUnaryOperation(Token::NOT, inner_not, nopos);
+      Expression* result_proxy = factory->NewVariableProxy(var_handler_result);
+      Expression* assignment =
+          factory->NewAssignment(Token::ASSIGN, result_proxy, outer_not, nopos);
+
+      else_side = factory->NewExpressionStatement(assignment, nopos);
+    }
+    call_handler =
+        factory->NewIfStatement(condition, then_side, else_side, nopos);
+  }
+
+  // do { ... }
+  DoExpression* instanceof;
+  {
+    Block* block = factory->NewBlock(nullptr, 5, true, nopos);
+    block->statements()->Add(get_O, zone);
+    block->statements()->Add(get_C, zone);
+    block->statements()->Add(validate_C, zone);
+    block->statements()->Add(initialize_handler, zone);
+    block->statements()->Add(call_handler, zone);
+
+    // Here is the desugared instanceof.
+    instanceof = factory->NewDoExpression(block, var_handler_result, nopos);
+    Rewriter::Rewrite(parser_, instanceof, avfactory);
+  }
+
+  return instanceof;
+}
+
+Statement* ParserTraits::CheckCallable(Variable* var, Expression* error) {
+  auto factory = parser_->factory();
+  auto avfactory = parser_->ast_value_factory();
+  const int nopos = RelocInfo::kNoPosition;
+  Statement* validate_var;
+  {
+    Expression* type_of = factory->NewUnaryOperation(
+        Token::TYPEOF, factory->NewVariableProxy(var), nopos);
+    Expression* function_literal =
+        factory->NewStringLiteral(avfactory->function_string(), nopos);
+    Expression* condition = factory->NewCompareOperation(
+        Token::EQ_STRICT, type_of, function_literal, nopos);
+
+    Statement* throw_call = factory->NewExpressionStatement(error, nopos);
+
+    validate_var = factory->NewIfStatement(
+        condition, factory->NewEmptyStatement(nopos), throw_call, nopos);
+  }
+  return validate_var;
+}
+
+void ParserTraits::BuildIteratorClose(ZoneList<Statement*>* statements,
+                                      Variable* iterator,
+                                      Expression* input,
+                                      Variable* var_output) {
+  //
+  // This function adds four statements to [statements], corresponding to the
+  // following code:
+  //
+  //   let iteratorReturn = iterator.return;
+  //   if (IS_NULL_OR_UNDEFINED(iteratorReturn) return input;
+  //   output = %_Call(iteratorReturn, iterator);
+  //   if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+  //
+
+  const int nopos = RelocInfo::kNoPosition;
+  auto factory = parser_->factory();
+  auto avfactory = parser_->ast_value_factory();
+  auto zone = parser_->zone();
+
+  // let iteratorReturn = iterator.return;
+  Variable* var_return = var_output;  // Reusing the output variable.
+  Statement* get_return;
+  {
+    Expression* iterator_proxy = factory->NewVariableProxy(iterator);
+    Expression* literal =
+        factory->NewStringLiteral(avfactory->return_string(), nopos);
+    Expression* property =
+        factory->NewProperty(iterator_proxy, literal, nopos);
+    Expression* return_proxy = factory->NewVariableProxy(var_return);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, return_proxy, property, nopos);
+    get_return = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+  // if (IS_NULL_OR_UNDEFINED(iteratorReturn) return input;
+  Statement* check_return;
+  {
+    Expression* condition = factory->NewCompareOperation(
+        Token::EQ, factory->NewVariableProxy(var_return),
+        factory->NewNullLiteral(nopos), nopos);
+
+    Statement* return_input = factory->NewReturnStatement(input, nopos);
+
+    check_return = factory->NewIfStatement(
+        condition, return_input, factory->NewEmptyStatement(nopos), nopos);
+  }
+
+  // output = %_Call(iteratorReturn, iterator);
+  Statement* call_return;
+  {
+    auto args = new (zone) ZoneList<Expression*>(3, zone);
+    args->Add(factory->NewVariableProxy(var_return), zone);
+    args->Add(factory->NewVariableProxy(iterator), zone);
+
+    Expression* call =
+        factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+    Expression* output_proxy = factory->NewVariableProxy(var_output);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, output_proxy, call, nopos);
+    call_return = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+  // if (!IS_RECEIVER(output)) %ThrowIteratorResultNotAnObject(output);
+  Statement* validate_output;
+  {
+    Expression* is_receiver_call;
+    {
+      auto args = new (zone) ZoneList<Expression*>(1, zone);
+      args->Add(factory->NewVariableProxy(var_output), zone);
+      is_receiver_call =
+          factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+    }
+
+    Statement* throw_call;
+    {
+      auto args = new (zone) ZoneList<Expression*>(1, zone);
+      args->Add(factory->NewVariableProxy(var_output), zone);
+      Expression* call = factory->NewCallRuntime(
+          Runtime::kThrowIteratorResultNotAnObject, args, nopos);
+      throw_call = factory->NewExpressionStatement(call, nopos);
+    }
+
+    validate_output = factory->NewIfStatement(
+        is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+  }
+
+  statements->Add(get_return, zone);
+  statements->Add(check_return, zone);
+  statements->Add(call_return, zone);
+  statements->Add(validate_output, zone);
+}
+
+
+// Runtime encoding of different completion modes.
+enum ForOfLoopBodyCompletion { BODY_COMPLETED, BODY_ABORTED, BODY_THREW };
+
+void ParserTraits::BuildIteratorCloseForCompletion(
+    ZoneList<Statement*>* statements, Variable* iterator,
+    Variable* completion) {
+  //
+  // This function adds two statements to [statements], corresponding to the
+  // following code:
+  //
+  //   let iteratorReturn = iterator.return;
+  //   if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) {
+  //     let output;
+  //     if (completion === BODY_THREW) {
+  //       if (!IS_CALLABLE(iteratorReturn)) {
+  //         throw MakeTypeError(kReturnMethodNotCallable);
+  //       }
+  //       try { output = %_Call(iteratorReturn, iterator) } catch (_) { }
+  //     } else {
+  //       output = %_Call(iteratorReturn, iterator);
+  //     }
+  //     if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+  //   }
+  //
+
+  const int nopos = RelocInfo::kNoPosition;
+  auto factory = parser_->factory();
+  auto avfactory = parser_->ast_value_factory();
+  auto scope = parser_->scope_;
+  auto zone = parser_->zone();
+
+  // let output;
+  Variable* var_output = scope->NewTemporary(avfactory->empty_string());
+
+  // let iteratorReturn = iterator.return;
+  Variable* var_return = var_output;  // Reusing the output variable.
+  Statement* get_return;
+  {
+    Expression* iterator_proxy = factory->NewVariableProxy(iterator);
+    Expression* literal =
+        factory->NewStringLiteral(avfactory->return_string(), nopos);
+    Expression* property =
+        factory->NewProperty(iterator_proxy, literal, nopos);
+    Expression* return_proxy = factory->NewVariableProxy(var_return);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, return_proxy, property, nopos);
+    get_return = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+  // if (!IS_CALLABLE(iteratorReturn)) {
+  //   throw MakeTypeError(kReturnMethodNotCallable);
+  // }
+  Statement* check_return_callable;
+  {
+    Expression* throw_expr = NewThrowTypeError(
+        MessageTemplate::kReturnMethodNotCallable,
+        avfactory->empty_string(), nopos);
+    check_return_callable = CheckCallable(var_return, throw_expr);
+  }
+
+  // output = %_Call(iteratorReturn, iterator);
+  Statement* call_return;
+  {
+    auto args = new (zone) ZoneList<Expression*>(2, zone);
+    args->Add(factory->NewVariableProxy(var_return), zone);
+    args->Add(factory->NewVariableProxy(iterator), zone);
+    Expression* call =
+        factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+
+    Expression* output_proxy = factory->NewVariableProxy(var_output);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, output_proxy, call, nopos);
+    call_return = factory->NewExpressionStatement(assignment, nopos);
+  }
+
+  // try { output = %_Call(iteratorReturn, iterator) } catch (_) { }
+  Statement* try_call_return;
+  {
+    auto args = new (zone) ZoneList<Expression*>(2, zone);
+    args->Add(factory->NewVariableProxy(var_return), zone);
+    args->Add(factory->NewVariableProxy(iterator), zone);
+
+    Expression* call =
+        factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, factory->NewVariableProxy(var_output), call, nopos);
+
+    Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+    try_block->statements()->Add(
+        factory->NewExpressionStatement(assignment, nopos), zone);
+
+    Block* catch_block = factory->NewBlock(nullptr, 0, false, nopos);
+
+    Scope* catch_scope = NewScope(scope, CATCH_SCOPE);
+    Variable* catch_variable = catch_scope->DeclareLocal(
+        avfactory->dot_catch_string(), VAR, kCreatedInitialized,
+        Variable::NORMAL);
+
+    try_call_return = factory->NewTryCatchStatement(
+        try_block, catch_scope, catch_variable, catch_block, nopos);
+  }
+
+  // if (completion === ABRUPT_THROW) {
+  //   #check_return_callable;
+  //   #try_call_return;
+  // } else {
+  //   #call_return;
+  // }
+  Statement* call_return_carefully;
+  {
+    Expression* condition = factory->NewCompareOperation(
+        Token::EQ_STRICT, factory->NewVariableProxy(completion),
+        factory->NewSmiLiteral(BODY_THREW, nopos), nopos);
+
+    Block* then_block = factory->NewBlock(nullptr, 2, false, nopos);
+    then_block->statements()->Add(check_return_callable, zone);
+    then_block->statements()->Add(try_call_return, zone);
+
+    call_return_carefully =
+        factory->NewIfStatement(condition, then_block, call_return, nopos);
+  }
+
+  // if (!IS_RECEIVER(output)) %ThrowIteratorResultNotAnObject(output);
+  Statement* validate_output;
+  {
+    Expression* is_receiver_call;
+    {
+      auto args = new (zone) ZoneList<Expression*>(1, zone);
+      args->Add(factory->NewVariableProxy(var_output), zone);
+      is_receiver_call =
+          factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+    }
+
+    Statement* throw_call;
+    {
+      auto args = new (zone) ZoneList<Expression*>(1, zone);
+      args->Add(factory->NewVariableProxy(var_output), zone);
+      Expression* call = factory->NewCallRuntime(
+          Runtime::kThrowIteratorResultNotAnObject, args, nopos);
+      throw_call = factory->NewExpressionStatement(call, nopos);
+    }
+
+    validate_output = factory->NewIfStatement(
+        is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+  }
+
+  // if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) { ... }
+  Statement* maybe_call_return;
+  {
+    Expression* condition = factory->NewCompareOperation(
+        Token::EQ, factory->NewVariableProxy(var_return),
+        factory->NewNullLiteral(nopos), nopos);
+
+    Block* block = factory->NewBlock(nullptr, 2, false, nopos);
+    block->statements()->Add(call_return_carefully, zone);
+    block->statements()->Add(validate_output, zone);
+
+    maybe_call_return = factory->NewIfStatement(
+        condition, factory->NewEmptyStatement(nopos), block, nopos);
+  }
+
+
+  statements->Add(get_return, zone);
+  statements->Add(maybe_call_return, zone);
+}
+
+
+Statement* ParserTraits::FinalizeForOfStatement(ForOfStatement* loop, int pos) {
+  if (!FLAG_harmony_iterator_close) return loop;
+
+  //
+  // This function replaces the loop with the following wrapping:
+  //
+  //   let completion = BODY_COMPLETED;
+  //   try {
+  //     #loop;
+  //   } catch(e) {
+  //     if (completion === BODY_ABORTED) completion = BODY_THREW;
+  //     throw e;
+  //   } finally {
+  //     if (!(completion === BODY_COMPLETED || IS_UNDEFINED(#iterator))) {
+  //       #BuildIteratorClose(#iterator, completion)  // See above.
+  //     }
+  //   }
+  //
+  // where the loop's body is wrapped as follows:
+  //
+  //   {
+  //     {{completion = BODY_ABORTED;}}
+  //     #loop-body
+  //     {{completion = BODY_COMPLETED;}}
+  //   }
+
+  const int nopos = RelocInfo::kNoPosition;
+  auto factory = parser_->factory();
+  auto avfactory = parser_->ast_value_factory();
+  auto scope = parser_->scope_;
+  auto zone = parser_->zone();
+
+  // let completion = BODY_COMPLETED;
+  Variable* var_completion = scope->NewTemporary(avfactory->empty_string());
+  Statement* initialize_completion;
+  {
+    Expression* proxy = factory->NewVariableProxy(var_completion);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, proxy,
+        factory->NewSmiLiteral(BODY_COMPLETED, nopos), nopos);
+    initialize_completion =
+        factory->NewExpressionStatement(assignment, nopos);
+  }
+
+  // if (completion === BODY_ABORTED) completion = BODY_THREW;
+  Statement* set_completion_throw;
+  {
+    Expression* condition = factory->NewCompareOperation(
+        Token::EQ_STRICT, factory->NewVariableProxy(var_completion),
+        factory->NewSmiLiteral(BODY_ABORTED, nopos), nopos);
+
+    Expression* proxy = factory->NewVariableProxy(var_completion);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, proxy, factory->NewSmiLiteral(BODY_THREW, nopos),
+        nopos);
+    Statement* statement = factory->NewExpressionStatement(assignment, nopos);
+    set_completion_throw = factory->NewIfStatement(
+        condition, statement, factory->NewEmptyStatement(nopos), nopos);
+  }
+
+  // if (!(completion === BODY_COMPLETED || IS_UNDEFINED(#iterator))) {
+  //   #BuildIteratorClose(#iterator, completion)
+  // }
+  Block* maybe_close;
+  {
+    Expression* condition1 = factory->NewCompareOperation(
+        Token::EQ_STRICT, factory->NewVariableProxy(var_completion),
+        factory->NewSmiLiteral(BODY_COMPLETED, nopos), nopos);
+    Expression* condition2 = factory->NewCompareOperation(
+        Token::EQ_STRICT, factory->NewVariableProxy(loop->iterator()),
+        factory->NewUndefinedLiteral(nopos), nopos);
+    Expression* condition = factory->NewBinaryOperation(
+        Token::OR, condition1, condition2, nopos);
+
+    Block* block = factory->NewBlock(nullptr, 2, false, nopos);
+    BuildIteratorCloseForCompletion(
+        block->statements(), loop->iterator(), var_completion);
+    DCHECK(block->statements()->length() == 2);
+
+    maybe_close = factory->NewBlock(nullptr, 1, false, nopos);
+    maybe_close->statements()->Add(factory->NewIfStatement(
+        condition, factory->NewEmptyStatement(nopos), block, nopos), zone);
+  }
+
+  // try { #try_block }
+  // catch(e) {
+  //   #set_completion_throw;
+  //   throw e;
+  // }
+  Statement* try_catch;
+  {
+    Scope* catch_scope = NewScope(scope, CATCH_SCOPE);
+    Variable* catch_variable = catch_scope->DeclareLocal(
+        avfactory->dot_catch_string(), VAR, kCreatedInitialized,
+        Variable::NORMAL);
+
+    Statement* rethrow;
+    {
+      Expression* proxy = factory->NewVariableProxy(catch_variable);
+      rethrow = factory->NewExpressionStatement(
+          factory->NewThrow(proxy, nopos), nopos);
+    }
+
+    Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+    try_block->statements()->Add(loop, zone);
+
+    Block* catch_block = factory->NewBlock(nullptr, 2, false, nopos);
+    catch_block->statements()->Add(set_completion_throw, zone);
+    catch_block->statements()->Add(rethrow, zone);
+
+    try_catch = factory->NewTryCatchStatement(
+        try_block, catch_scope, catch_variable, catch_block, nopos);
+  }
+
+  // try { #try_catch } finally { #maybe_close }
+  Statement* try_finally;
+  {
+    Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+    try_block->statements()->Add(try_catch, zone);
+
+    try_finally =
+        factory->NewTryFinallyStatement(try_block, maybe_close, nopos);
+  }
+
+  // #initialize_completion;
+  // #try_finally;
+  Statement* final_loop;
+  {
+    Block* block = factory->NewBlock(nullptr, 2, false, nopos);
+    block->statements()->Add(initialize_completion, zone);
+    block->statements()->Add(try_finally, zone);
+    final_loop = block;
+  }
+
+  // {{completion = BODY_ABORTED;}}
+  Statement* set_completion_break;
+  {
+    Expression* proxy = factory->NewVariableProxy(var_completion);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, proxy,
+        factory->NewSmiLiteral(BODY_ABORTED, nopos), nopos);
+
+    Block* block = factory->NewBlock(nullptr, 1, true, nopos);
+    block->statements()->Add(
+        factory->NewExpressionStatement(assignment, nopos), zone);
+    set_completion_break = block;
+  }
+
+  // {{completion = BODY_COMPLETED;}}
+  Statement* set_completion_normal;
+  {
+    Expression* proxy = factory->NewVariableProxy(var_completion);
+    Expression* assignment = factory->NewAssignment(
+        Token::ASSIGN, proxy, factory->NewSmiLiteral(BODY_COMPLETED, nopos),
+        nopos);
+
+    Block* block = factory->NewBlock(nullptr, 1, true, nopos);
+    block->statements()->Add(
+        factory->NewExpressionStatement(assignment, nopos), zone);
+    set_completion_normal = block;
+  }
+
+  // { #set_completion_break; #loop-body; #set_completion_normal }
+  Block* new_body = factory->NewBlock(nullptr, 2, false, nopos);
+  new_body->statements()->Add(set_completion_break, zone);
+  new_body->statements()->Add(loop->body(), zone);
+  new_body->statements()->Add(set_completion_normal, zone);
+
+  loop->set_body(new_body);
+  return final_loop;
+}
+
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/parsing/parser.h b/src/parsing/parser.h
index 7d50221..d4fb62f 100644
--- a/src/parsing/parser.h
+++ b/src/parsing/parser.h
@@ -335,6 +335,9 @@
 
     typedef v8::internal::AstProperties AstProperties;
 
+    typedef v8::internal::ExpressionClassifier<ParserTraits>
+        ExpressionClassifier;
+
     // Return types for traversing functions.
     typedef const AstRawString* Identifier;
     typedef v8::internal::Expression* Expression;
@@ -461,6 +464,8 @@
                             MessageTemplate::Template message,
                             const AstRawString* arg, int pos);
 
+  Statement* FinalizeForOfStatement(ForOfStatement* loop, int pos);
+
   // Reporting errors.
   void ReportMessageAt(Scanner::Location source_location,
                        MessageTemplate::Template message,
@@ -513,8 +518,8 @@
                                  int pos);
   Expression* NewTargetExpression(Scope* scope, AstNodeFactory* factory,
                                   int pos);
-  Expression* DefaultConstructor(bool call_super, Scope* scope, int pos,
-                                 int end_pos, LanguageMode language_mode);
+  Expression* FunctionSentExpression(Scope* scope, AstNodeFactory* factory,
+                                     int pos);
   Literal* ExpressionFromLiteral(Token::Value token, int pos, Scanner* scanner,
                                  AstNodeFactory* factory);
   Expression* ExpressionFromIdentifier(const AstRawString* name,
@@ -547,7 +552,7 @@
                                     int initializer_end_position, bool is_rest);
   V8_INLINE void DeclareFormalParameter(
       Scope* scope, const ParserFormalParameters::Parameter& parameter,
-      ExpressionClassifier* classifier);
+      Type::ExpressionClassifier* classifier);
   void ParseArrowFunctionFormalParameters(ParserFormalParameters* parameters,
                                           Expression* params,
                                           const Scanner::Location& params_loc,
@@ -567,7 +572,6 @@
       const AstRawString* name, Scanner::Location function_name_location,
       FunctionNameValidity function_name_validity, FunctionKind kind,
       int function_token_position, FunctionLiteral::FunctionType type,
-      FunctionLiteral::ArityRestriction arity_restriction,
       LanguageMode language_mode, bool* ok);
   V8_INLINE void SkipLazyFunctionBody(
       int* materialized_literal_count, int* expected_property_count, bool* ok,
@@ -642,6 +646,7 @@
 
   V8_INLINE void QueueDestructuringAssignmentForRewriting(
       Expression* assignment);
+  V8_INLINE void QueueNonPatternForRewriting(Expression* expr);
 
   void SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
                                        const AstRawString* name);
@@ -650,17 +655,28 @@
                                         Expression* identifier);
 
   // Rewrite expressions that are not used as patterns
-  V8_INLINE Expression* RewriteNonPattern(
-      Expression* expr, const ExpressionClassifier* classifier, bool* ok);
-  V8_INLINE ZoneList<Expression*>* RewriteNonPatternArguments(
-      ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
-      bool* ok);
-  V8_INLINE ObjectLiteralProperty* RewriteNonPatternObjectLiteralProperty(
-      ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
-      bool* ok);
+  V8_INLINE void RewriteNonPattern(Type::ExpressionClassifier* classifier,
+                                   bool* ok);
+
+  V8_INLINE Zone* zone() const;
+
+  V8_INLINE ZoneList<Expression*>* GetNonPatternList() const;
+
+  Expression* RewriteYieldStar(
+      Expression* generator, Expression* expression, int pos);
+
+  Expression* RewriteInstanceof(Expression* lhs, Expression* rhs, int pos);
 
  private:
   Parser* parser_;
+
+  void BuildIteratorClose(
+      ZoneList<Statement*>* statements, Variable* iterator,
+      Expression* input, Variable* output);
+  void BuildIteratorCloseForCompletion(
+      ZoneList<Statement*>* statements, Variable* iterator,
+      Variable* body_threw);
+  Statement* CheckCallable(Variable* var, Expression* error);
 };
 
 
@@ -744,6 +760,9 @@
                                    bool* ok);
   Statement* ParseFunctionDeclaration(ZoneList<const AstRawString*>* names,
                                       bool* ok);
+  Statement* ParseFunctionDeclaration(int pos, bool is_generator,
+                                      ZoneList<const AstRawString*>* names,
+                                      bool* ok);
   Statement* ParseClassDeclaration(ZoneList<const AstRawString*>* names,
                                    bool* ok);
   Statement* ParseNativeDeclaration(bool* ok);
@@ -754,6 +773,7 @@
                                 ZoneList<const AstRawString*>* names,
                                 bool* ok);
   DoExpression* ParseDoExpression(bool* ok);
+  Expression* ParseYieldStarExpression(bool* ok);
 
   struct DeclarationDescriptor {
     enum Kind { NORMAL, PARAMETER };
@@ -761,7 +781,6 @@
     Scope* scope;
     Scope* hoist_scope;
     VariableMode mode;
-    bool needs_init;
     int declaration_pos;
     int initialization_pos;
     Kind declaration_kind;
@@ -801,8 +820,9 @@
         const DeclarationParsingResult::Declaration* declaration,
         ZoneList<const AstRawString*>* names, bool* ok);
 
-    static void RewriteDestructuringAssignment(
-        Parser* parser, RewritableAssignmentExpression* expr, Scope* Scope);
+    static void RewriteDestructuringAssignment(Parser* parser,
+                                               RewritableExpression* expr,
+                                               Scope* Scope);
 
     static Expression* RewriteDestructuringAssignment(Parser* parser,
                                                       Assignment* assignment,
@@ -872,10 +892,10 @@
     bool* ok_;
   };
 
-
-  void ParseVariableDeclarations(VariableDeclarationContext var_context,
-                                 DeclarationParsingResult* parsing_result,
-                                 bool* ok);
+  Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
+                                   DeclarationParsingResult* parsing_result,
+                                   ZoneList<const AstRawString*>* names,
+                                   bool* ok);
   Statement* ParseExpressionOrLabelledStatement(
       ZoneList<const AstRawString*>* labels, bool* ok);
   IfStatement* ParseIfStatement(ZoneList<const AstRawString*>* labels,
@@ -896,6 +916,8 @@
   Statement* ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
   Statement* ParseThrowStatement(bool* ok);
   Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
+  class DontCollectExpressionsInTailPositionScope;
+  class CollectExpressionsInTailPositionToListScope;
   TryStatement* ParseTryStatement(bool* ok);
   DebuggerStatement* ParseDebuggerStatement(bool* ok);
 
@@ -910,9 +932,9 @@
                                   Expression* subject, Statement* body,
                                   bool is_destructuring);
   Statement* DesugarLexicalBindingsInForStatement(
-      Scope* inner_scope, bool is_const, ZoneList<const AstRawString*>* names,
-      ForStatement* loop, Statement* init, Expression* cond, Statement* next,
-      Statement* body, bool* ok);
+      Scope* inner_scope, VariableMode mode,
+      ZoneList<const AstRawString*>* names, ForStatement* loop, Statement* init,
+      Expression* cond, Statement* next, Statement* body, bool* ok);
 
   void RewriteDoExpression(Expression* expr, bool* ok);
 
@@ -920,7 +942,6 @@
       const AstRawString* name, Scanner::Location function_name_location,
       FunctionNameValidity function_name_validity, FunctionKind kind,
       int function_token_position, FunctionLiteral::FunctionType type,
-      FunctionLiteral::ArityRestriction arity_restriction,
       LanguageMode language_mode, bool* ok);
 
 
@@ -966,8 +987,9 @@
   Statement* BuildAssertIsCoercible(Variable* var);
 
   // Factory methods.
-  FunctionLiteral* DefaultConstructor(bool call_super, Scope* scope, int pos,
-                                      int end_pos, LanguageMode language_mode);
+  FunctionLiteral* DefaultConstructor(const AstRawString* name, bool call_super,
+                                      Scope* scope, int pos, int end_pos,
+                                      LanguageMode language_mode);
 
   // Skip over a lazy function, either using cached data if we have it, or
   // by parsing the function with PreParser. Consumes the ending }.
@@ -1013,14 +1035,10 @@
 
   V8_INLINE void RewriteDestructuringAssignments();
 
-  V8_INLINE Expression* RewriteNonPattern(
-      Expression* expr, const ExpressionClassifier* classifier, bool* ok);
-  V8_INLINE ZoneList<Expression*>* RewriteNonPatternArguments(
-      ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
-      bool* ok);
-  V8_INLINE ObjectLiteralProperty* RewriteNonPatternObjectLiteralProperty(
-      ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
-      bool* ok);
+  friend class NonPatternRewriter;
+  V8_INLINE Expression* RewriteSpreads(ArrayLiteral* lit);
+
+  V8_INLINE void RewriteNonPattern(ExpressionClassifier* classifier, bool* ok);
 
   friend class InitializerRewriter;
   void RewriteParameterInitializer(Expression* expr, Scope* scope);
@@ -1171,7 +1189,7 @@
 
 void ParserTraits::DeclareFormalParameter(
     Scope* scope, const ParserFormalParameters::Parameter& parameter,
-    ExpressionClassifier* classifier) {
+    Type::ExpressionClassifier* classifier) {
   bool is_duplicate = false;
   bool is_simple = classifier->is_simple_parameter_list();
   auto name = is_simple || parameter.is_rest
diff --git a/src/parsing/pattern-rewriter.cc b/src/parsing/pattern-rewriter.cc
index 6e20282..768a948 100644
--- a/src/parsing/pattern-rewriter.cc
+++ b/src/parsing/pattern-rewriter.cc
@@ -33,7 +33,7 @@
 
 
 void Parser::PatternRewriter::RewriteDestructuringAssignment(
-    Parser* parser, RewritableAssignmentExpression* to_rewrite, Scope* scope) {
+    Parser* parser, RewritableExpression* to_rewrite, Scope* scope) {
   PatternRewriter rewriter;
 
   DCHECK(!to_rewrite->is_rewritten());
@@ -58,8 +58,7 @@
     Parser* parser, Assignment* assignment, Scope* scope) {
   DCHECK_NOT_NULL(assignment);
   DCHECK_EQ(Token::ASSIGN, assignment->op());
-  auto to_rewrite =
-      parser->factory()->NewRewritableAssignmentExpression(assignment);
+  auto to_rewrite = parser->factory()->NewRewritableExpression(assignment);
   RewriteDestructuringAssignment(parser, to_rewrite, scope);
   return to_rewrite->expression();
 }
@@ -78,7 +77,11 @@
 Parser::PatternRewriter::PatternContext
 Parser::PatternRewriter::SetAssignmentContextIfNeeded(Expression* node) {
   PatternContext old_context = context();
-  if (node->IsAssignment() && node->AsAssignment()->op() == Token::ASSIGN) {
+  // AssignmentExpressions may occur in the Initializer position of a
+  // SingleNameBinding. Such expressions should not prompt a change in the
+  // pattern's context.
+  if (node->IsAssignment() && node->AsAssignment()->op() == Token::ASSIGN &&
+      !IsInitializerContext()) {
     set_context(ASSIGNMENT);
   }
   return old_context;
@@ -91,8 +94,8 @@
   // AssignmentElement nodes
   PatternContext old_context = context();
   bool is_destructuring_assignment =
-      node->IsRewritableAssignmentExpression() &&
-      !node->AsRewritableAssignmentExpression()->is_rewritten();
+      node->IsRewritableExpression() &&
+      !node->AsRewritableExpression()->is_rewritten();
   bool is_assignment =
       node->IsAssignment() && node->AsAssignment()->op() == Token::ASSIGN;
   if (is_destructuring_assignment || is_assignment) {
@@ -324,10 +327,11 @@
 }
 
 
-void Parser::PatternRewriter::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* node) {
-  if (!IsAssignmentContext()) {
-    // Mark the assignment as rewritten to prevent redundant rewriting, and
+void Parser::PatternRewriter::VisitRewritableExpression(
+    RewritableExpression* node) {
+  // If this is not a destructuring assignment...
+  if (!IsAssignmentContext() || !node->expression()->IsAssignment()) {
+    // Mark the node as rewritten to prevent redundant rewriting, and
     // perform BindingPattern rewriting
     DCHECK(!node->is_rewritten());
     node->Rewrite(node->expression());
diff --git a/src/parsing/preparser.cc b/src/parsing/preparser.cc
index 64511ac..d335c8b 100644
--- a/src/parsing/preparser.cc
+++ b/src/parsing/preparser.cc
@@ -94,11 +94,10 @@
     PreParserIdentifier name, Scanner::Location function_name_location,
     FunctionNameValidity function_name_validity, FunctionKind kind,
     int function_token_position, FunctionLiteral::FunctionType type,
-    FunctionLiteral::ArityRestriction arity_restriction,
     LanguageMode language_mode, bool* ok) {
   return pre_parser_->ParseFunctionLiteral(
       name, function_name_location, function_name_validity, kind,
-      function_token_position, type, arity_restriction, language_mode, ok);
+      function_token_position, type, language_mode, ok);
 }
 
 
@@ -451,8 +450,7 @@
                                           : kFunctionNameValidityUnknown,
                        is_generator ? FunctionKind::kGeneratorFunction
                                     : FunctionKind::kNormalFunction,
-                       pos, FunctionLiteral::kDeclaration,
-                       FunctionLiteral::kNormalArity, language_mode(),
+                       pos, FunctionLiteral::kDeclaration, language_mode(),
                        CHECK_OK);
   return Statement::FunctionDeclaration();
 }
@@ -575,7 +573,7 @@
     int decl_pos = peek_position();
     PreParserExpression pattern = PreParserExpression::Default();
     {
-      ExpressionClassifier pattern_classifier;
+      ExpressionClassifier pattern_classifier(this);
       Token::Value next = peek();
       pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
 
@@ -591,17 +589,12 @@
       }
     }
 
-    is_pattern = (pattern.IsObjectLiteral() || pattern.IsArrayLiteral()) &&
-                 !pattern.is_parenthesized();
-
-    bool is_for_iteration_variable =
-        var_context == kForStatement &&
-        (peek() == Token::IN || PeekContextualKeyword(CStrVector("of")));
+    is_pattern = pattern.IsObjectLiteral() || pattern.IsArrayLiteral();
 
     Scanner::Location variable_loc = scanner()->location();
     nvars++;
     if (Check(Token::ASSIGN)) {
-      ExpressionClassifier classifier;
+      ExpressionClassifier classifier(this);
       ParseAssignmentExpression(var_context != kForStatement, &classifier,
                                 CHECK_OK);
       ValidateExpression(&classifier, CHECK_OK);
@@ -611,7 +604,7 @@
         *first_initializer_loc = variable_loc;
       }
     } else if ((require_initializer || is_pattern) &&
-               !is_for_iteration_variable) {
+               (var_context != kForStatement || !PeekInOrOf())) {
       PreParserTraits::ReportMessageAt(
           Scanner::Location(decl_pos, scanner()->location().end_pos),
           MessageTemplate::kDeclarationMissingInitializer,
@@ -655,7 +648,7 @@
           IsClassConstructor(function_state_->kind())) {
         bool is_this = peek() == Token::THIS;
         Expression expr = Expression::Default();
-        ExpressionClassifier classifier;
+        ExpressionClassifier classifier(this);
         if (is_this) {
           expr = ParseStrongInitializationExpression(&classifier, CHECK_OK);
         } else {
@@ -691,7 +684,7 @@
   }
 
   bool starts_with_identifier = peek_any_identifier();
-  ExpressionClassifier classifier;
+  ExpressionClassifier classifier(this);
   Expression expr = ParseExpression(true, &classifier, CHECK_OK);
   ValidateExpression(&classifier, CHECK_OK);
 
@@ -924,40 +917,40 @@
       ParseVariableDeclarations(kForStatement, &decl_count, &is_lexical,
                                 &is_binding_pattern, &first_initializer_loc,
                                 &bindings_loc, CHECK_OK);
-      bool accept_IN = decl_count >= 1;
-      if (accept_IN && CheckInOrOf(&mode, ok)) {
+      if (CheckInOrOf(&mode, ok)) {
         if (!*ok) return Statement::Default();
         if (decl_count != 1) {
-          const char* loop_type =
-              mode == ForEachStatement::ITERATE ? "for-of" : "for-in";
           PreParserTraits::ReportMessageAt(
               bindings_loc, MessageTemplate::kForInOfLoopMultiBindings,
-              loop_type);
+              ForEachStatement::VisitModeString(mode));
           *ok = false;
           return Statement::Default();
         }
         if (first_initializer_loc.IsValid() &&
             (is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
              is_lexical || is_binding_pattern)) {
-          if (mode == ForEachStatement::ITERATE) {
-            ReportMessageAt(first_initializer_loc,
-                            MessageTemplate::kForOfLoopInitializer);
-          } else {
-            // TODO(caitp): This should be an error in sloppy mode, too.
-            ReportMessageAt(first_initializer_loc,
-                            MessageTemplate::kForInLoopInitializer);
-          }
+          PreParserTraits::ReportMessageAt(
+              first_initializer_loc, MessageTemplate::kForInOfLoopInitializer,
+              ForEachStatement::VisitModeString(mode));
           *ok = false;
           return Statement::Default();
         }
-        ParseExpression(true, CHECK_OK);
+
+        if (mode == ForEachStatement::ITERATE) {
+          ExpressionClassifier classifier(this);
+          ParseAssignmentExpression(true, &classifier, CHECK_OK);
+          RewriteNonPattern(&classifier, CHECK_OK);
+        } else {
+          ParseExpression(true, CHECK_OK);
+        }
+
         Expect(Token::RPAREN, CHECK_OK);
         ParseSubStatement(CHECK_OK);
         return Statement::Default();
       }
     } else {
       int lhs_beg_pos = peek_position();
-      ExpressionClassifier classifier;
+      ExpressionClassifier classifier(this);
       Expression lhs = ParseExpression(false, &classifier, CHECK_OK);
       int lhs_end_pos = scanner()->location().end_pos;
       is_let_identifier_expression =
@@ -980,7 +973,15 @@
               lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
               kSyntaxError, CHECK_OK);
         }
-        ParseExpression(true, CHECK_OK);
+
+        if (mode == ForEachStatement::ITERATE) {
+          ExpressionClassifier classifier(this);
+          ParseAssignmentExpression(true, &classifier, CHECK_OK);
+          RewriteNonPattern(&classifier, CHECK_OK);
+        } else {
+          ParseExpression(true, CHECK_OK);
+        }
+
         Expect(Token::RPAREN, CHECK_OK);
         ParseSubStatement(CHECK_OK);
         return Statement::Default();
@@ -1054,7 +1055,7 @@
   if (tok == Token::CATCH) {
     Consume(Token::CATCH);
     Expect(Token::LPAREN, CHECK_OK);
-    ExpressionClassifier pattern_classifier;
+    ExpressionClassifier pattern_classifier(this);
     ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
     ValidateBindingPattern(&pattern_classifier, CHECK_OK);
     Expect(Token::RPAREN, CHECK_OK);
@@ -1099,7 +1100,6 @@
     Identifier function_name, Scanner::Location function_name_location,
     FunctionNameValidity function_name_validity, FunctionKind kind,
     int function_token_pos, FunctionLiteral::FunctionType function_type,
-    FunctionLiteral::ArityRestriction arity_restriction,
     LanguageMode language_mode, bool* ok) {
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
@@ -1112,7 +1112,7 @@
   FunctionState function_state(&function_state_, &scope_, function_scope, kind,
                                &factory);
   DuplicateFinder duplicate_finder(scanner()->unicode_cache());
-  ExpressionClassifier formals_classifier(&duplicate_finder);
+  ExpressionClassifier formals_classifier(this, &duplicate_finder);
 
   Expect(Token::LPAREN, CHECK_OK);
   int start_position = scanner()->location().beg_pos;
@@ -1122,8 +1122,7 @@
   Expect(Token::RPAREN, CHECK_OK);
   int formals_end_position = scanner()->location().end_pos;
 
-  CheckArityRestrictions(formals.arity, arity_restriction,
-                         formals.has_rest, start_position,
+  CheckArityRestrictions(formals.arity, kind, formals.has_rest, start_position,
                          formals_end_position, CHECK_OK);
 
   // See Parser::ParseFunctionLiteral for more information about lazy parsing
@@ -1219,7 +1218,7 @@
 
   bool has_extends = Check(Token::EXTENDS);
   if (has_extends) {
-    ExpressionClassifier classifier;
+    ExpressionClassifier classifier(this);
     ParseLeftHandSideExpression(&classifier, CHECK_OK);
     ValidateExpression(&classifier, CHECK_OK);
   }
@@ -1235,7 +1234,7 @@
     bool is_computed_name = false;  // Classes do not care about computed
                                     // property names here.
     Identifier name;
-    ExpressionClassifier classifier;
+    ExpressionClassifier classifier(this);
     ParsePropertyDefinition(&checker, in_class, has_extends, is_static,
                             &is_computed_name, &has_seen_constructor,
                             &classifier, &name, CHECK_OK);
@@ -1259,7 +1258,7 @@
   // Allow "eval" or "arguments" for backward compatibility.
   ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
   Scanner::Location spread_pos;
-  ExpressionClassifier classifier;
+  ExpressionClassifier classifier(this);
   ParseArguments(&spread_pos, &classifier, ok);
   ValidateExpression(&classifier, CHECK_OK);
 
diff --git a/src/parsing/preparser.h b/src/parsing/preparser.h
index 59100f1..253251c 100644
--- a/src/parsing/preparser.h
+++ b/src/parsing/preparser.h
@@ -279,12 +279,6 @@
   int position() const { return RelocInfo::kNoPosition; }
   void set_function_token_position(int position) {}
 
-  // Parenthesized expressions in the form `( Expression )`.
-  void set_is_parenthesized() {
-    code_ = ParenthesizedField::update(code_, true);
-  }
-  bool is_parenthesized() const { return ParenthesizedField::decode(code_); }
-
  private:
   enum Type {
     kExpression,
@@ -491,8 +485,7 @@
                                           PreParserExpression right, int pos) {
     return PreParserExpression::Default();
   }
-  PreParserExpression NewRewritableAssignmentExpression(
-      PreParserExpression expression) {
+  PreParserExpression NewRewritableExpression(PreParserExpression expression) {
     return expression;
   }
   PreParserExpression NewAssignment(Token::Value op,
@@ -550,7 +543,8 @@
     return PreParserExpression::Default();
   }
 
-  PreParserExpression NewSpread(PreParserExpression expression, int pos) {
+  PreParserExpression NewSpread(PreParserExpression expression, int pos,
+                                int expr_pos) {
     return PreParserExpression::Spread(expression);
   }
 
@@ -592,6 +586,9 @@
 
     typedef int AstProperties;
 
+    typedef v8::internal::ExpressionClassifier<PreParserTraits>
+        ExpressionClassifier;
+
     // Return types for traversing functions.
     typedef PreParserIdentifier Identifier;
     typedef PreParserExpression Expression;
@@ -797,8 +794,9 @@
     return PreParserExpression::Default();
   }
 
-  static PreParserExpression DefaultConstructor(bool call_super, Scope* scope,
-                                                int pos, int end_pos) {
+  static PreParserExpression FunctionSentExpression(Scope* scope,
+                                                    PreParserFactory* factory,
+                                                    int pos) {
     return PreParserExpression::Default();
   }
 
@@ -887,7 +885,7 @@
     ++parameters->arity;
   }
   void DeclareFormalParameter(Scope* scope, PreParserIdentifier parameter,
-                              ExpressionClassifier* classifier) {
+                              Type::ExpressionClassifier* classifier) {
     if (!classifier->is_simple_parameter_list()) {
       scope->SetHasNonSimpleParameters();
     }
@@ -902,7 +900,6 @@
       PreParserIdentifier name, Scanner::Location function_name_location,
       FunctionNameValidity function_name_validity, FunctionKind kind,
       int function_token_position, FunctionLiteral::FunctionType type,
-      FunctionLiteral::ArityRestriction arity_restriction,
       LanguageMode language_mode, bool* ok);
 
   PreParserExpression ParseClassLiteral(PreParserIdentifier name,
@@ -926,21 +923,24 @@
   inline void RewriteDestructuringAssignments() {}
 
   inline void QueueDestructuringAssignmentForRewriting(PreParserExpression) {}
+  inline void QueueNonPatternForRewriting(PreParserExpression) {}
 
   void SetFunctionNameFromPropertyName(PreParserExpression,
                                        PreParserIdentifier) {}
   void SetFunctionNameFromIdentifierRef(PreParserExpression,
                                         PreParserExpression) {}
 
-  inline PreParserExpression RewriteNonPattern(
-      PreParserExpression expr, const ExpressionClassifier* classifier,
-      bool* ok);
-  inline PreParserExpression RewriteNonPatternArguments(
-      PreParserExpression args, const ExpressionClassifier* classifier,
-      bool* ok);
-  inline PreParserExpression RewriteNonPatternObjectLiteralProperty(
-      PreParserExpression property, const ExpressionClassifier* classifier,
-      bool* ok);
+  inline void RewriteNonPattern(Type::ExpressionClassifier* classifier,
+                                bool* ok);
+
+  V8_INLINE Zone* zone() const;
+  V8_INLINE ZoneList<PreParserExpression>* GetNonPatternList() const;
+
+  inline PreParserExpression RewriteYieldStar(
+      PreParserExpression generator, PreParserExpression expr, int pos);
+  inline PreParserExpression RewriteInstanceof(PreParserExpression lhs,
+                                               PreParserExpression rhs,
+                                               int pos);
 
  private:
   PreParser* pre_parser_;
@@ -1071,7 +1071,6 @@
       Identifier name, Scanner::Location function_name_location,
       FunctionNameValidity function_name_validity, FunctionKind kind,
       int function_token_pos, FunctionLiteral::FunctionType function_type,
-      FunctionLiteral::ArityRestriction arity_restriction,
       LanguageMode language_mode, bool* ok);
   void ParseLazyFunctionLiteralBody(bool* ok,
                                     Scanner::BookmarkScope* bookmark = nullptr);
@@ -1123,30 +1122,34 @@
 }
 
 
-PreParserExpression PreParserTraits::RewriteNonPattern(
-    PreParserExpression expr, const ExpressionClassifier* classifier,
-    bool* ok) {
+void PreParserTraits::RewriteNonPattern(Type::ExpressionClassifier* classifier,
+                                        bool* ok) {
   pre_parser_->ValidateExpression(classifier, ok);
-  return expr;
 }
 
 
-PreParserExpression PreParserTraits::RewriteNonPatternArguments(
-    PreParserExpression args, const ExpressionClassifier* classifier,
-    bool* ok) {
-  pre_parser_->ValidateExpression(classifier, ok);
-  return args;
+Zone* PreParserTraits::zone() const {
+  return pre_parser_->function_state_->scope()->zone();
 }
 
 
-PreParserExpression PreParserTraits::RewriteNonPatternObjectLiteralProperty(
-    PreParserExpression property, const ExpressionClassifier* classifier,
-    bool* ok) {
-  pre_parser_->ValidateExpression(classifier, ok);
-  return property;
+ZoneList<PreParserExpression>* PreParserTraits::GetNonPatternList() const {
+  return pre_parser_->function_state_->non_patterns_to_rewrite();
 }
 
 
+PreParserExpression PreParserTraits::RewriteYieldStar(
+    PreParserExpression generator, PreParserExpression expression, int pos) {
+  return pre_parser_->factory()->NewYield(
+      generator, expression, Yield::kDelegating, pos);
+}
+
+PreParserExpression PreParserTraits::RewriteInstanceof(PreParserExpression lhs,
+                                                       PreParserExpression rhs,
+                                                       int pos) {
+  return PreParserExpression::Default();
+}
+
 PreParserStatementList PreParser::ParseEagerFunctionBody(
     PreParserIdentifier function_name, int pos,
     const PreParserFormalParameters& parameters, FunctionKind kind,
diff --git a/src/parsing/rewriter.cc b/src/parsing/rewriter.cc
index 4da60ac..c8e8fed 100644
--- a/src/parsing/rewriter.cc
+++ b/src/parsing/rewriter.cc
@@ -31,6 +31,7 @@
         result_assigned_(false),
         replacement_(nullptr),
         is_set_(false),
+        zone_(ast_value_factory->zone()),
         scope_(scope),
         factory_(ast_value_factory) {
     InitializeAstVisitor(parser->stack_limit());
@@ -148,7 +149,7 @@
   is_set_ = is_set_ && set_in_then;
   replacement_ = node;
 
-  if (FLAG_harmony_completion && !is_set_) {
+  if (!is_set_) {
     is_set_ = true;
     replacement_ = AssignUndefinedBefore(node);
   }
@@ -164,7 +165,7 @@
   is_set_ = is_set_ && set_after;
   replacement_ = node;
 
-  if (FLAG_harmony_completion && !is_set_) {
+  if (!is_set_) {
     is_set_ = true;
     replacement_ = AssignUndefinedBefore(node);
   }
@@ -208,7 +209,7 @@
   is_set_ = is_set_ && set_in_try;
   replacement_ = node;
 
-  if (FLAG_harmony_completion && !is_set_) {
+  if (!is_set_) {
     is_set_ = true;
     replacement_ = AssignUndefinedBefore(node);
   }
@@ -225,6 +226,7 @@
      // at the end again: ".backup = .result; ...; .result = .backup"
      // This is necessary because the finally block does not normally contribute
      // to the completion value.
+    CHECK(scope() != nullptr);
     Variable* backup = scope()->NewTemporary(
         factory()->ast_value_factory()->dot_result_string());
     Expression* backup_proxy = factory()->NewVariableProxy(backup);
@@ -245,7 +247,7 @@
   node->set_try_block(replacement_->AsBlock());
   replacement_ = node;
 
-  if (FLAG_harmony_completion && !is_set_) {
+  if (!is_set_) {
     is_set_ = true;
     replacement_ = AssignUndefinedBefore(node);
   }
@@ -263,7 +265,7 @@
   is_set_ = is_set_ && set_after;
   replacement_ = node;
 
-  if (FLAG_harmony_completion && !is_set_) {
+  if (!is_set_) {
     is_set_ = true;
     replacement_ = AssignUndefinedBefore(node);
   }
@@ -287,7 +289,7 @@
   node->set_statement(replacement_);
   replacement_ = node;
 
-  if (FLAG_harmony_completion && !is_set_) {
+  if (!is_set_) {
     is_set_ = true;
     replacement_ = AssignUndefinedBefore(node);
   }
diff --git a/src/parsing/scanner.cc b/src/parsing/scanner.cc
index 7317593..2d5a579 100644
--- a/src/parsing/scanner.cc
+++ b/src/parsing/scanner.cc
@@ -39,7 +39,8 @@
 Scanner::Scanner(UnicodeCache* unicode_cache)
     : unicode_cache_(unicode_cache),
       bookmark_c0_(kNoBookmark),
-      octal_pos_(Location::invalid()) {
+      octal_pos_(Location::invalid()),
+      found_html_comment_(false) {
   bookmark_current_.literal_chars = &bookmark_current_literal_;
   bookmark_current_.raw_literal_chars = &bookmark_current_raw_literal_;
   bookmark_next_.literal_chars = &bookmark_next_literal_;
@@ -438,7 +439,10 @@
   Advance();
   if (c0_ == '-') {
     Advance();
-    if (c0_ == '-') return SkipSingleLineComment();
+    if (c0_ == '-') {
+      found_html_comment_ = true;
+      return SkipSingleLineComment();
+    }
     PushBack('-');  // undo Advance()
   }
   PushBack('!');  // undo Advance()
@@ -1206,7 +1210,9 @@
         (keyword_length <= 8 || input[8] == keyword[8]) &&          \
         (keyword_length <= 9 || input[9] == keyword[9])) {          \
       if (escaped) {                                                \
-        return token == Token::FUTURE_STRICT_RESERVED_WORD          \
+        /* TODO(adamk): YIELD should be handled specially. */       \
+        return (token == Token::FUTURE_STRICT_RESERVED_WORD ||      \
+                token == Token::LET || token == Token::STATIC)      \
                    ? Token::ESCAPED_STRICT_RESERVED_WORD            \
                    : Token::ESCAPED_KEYWORD;                        \
       }                                                             \
diff --git a/src/parsing/scanner.h b/src/parsing/scanner.h
index 1d0aba0..3f9bbb5 100644
--- a/src/parsing/scanner.h
+++ b/src/parsing/scanner.h
@@ -448,6 +448,8 @@
 
   bool IdentifierIsFutureStrictReserved(const AstRawString* string) const;
 
+  bool FoundHtmlComment() const { return found_html_comment_; }
+
  private:
   // The current and look-ahead token.
   struct TokenDesc {
@@ -473,6 +475,7 @@
     current_.literal_chars = NULL;
     current_.raw_literal_chars = NULL;
     next_next_.token = Token::UNINITIALIZED;
+    found_html_comment_ = false;
   }
 
   // Support BookmarkScope functionality.
@@ -752,6 +755,9 @@
   // Whether there is a multi-line comment that contains a
   // line-terminator after the current token, and before the next.
   bool has_multiline_comment_before_next_;
+
+  // Whether this scanner encountered an HTML comment.
+  bool found_html_comment_;
 };
 
 }  // namespace internal
diff --git a/src/parsing/token.h b/src/parsing/token.h
index fee1f7e..7a62b4d 100644
--- a/src/parsing/token.h
+++ b/src/parsing/token.h
@@ -280,6 +280,22 @@
     }
   }
 
+  static bool EvalComparison(Value op, double op1, double op2) {
+    DCHECK(IsArithmeticCompareOp(op));
+    switch (op) {
+      case Token::EQ:
+      case Token::EQ_STRICT: return (op1 == op2);
+      case Token::NE: return (op1 != op2);
+      case Token::LT: return (op1 < op2);
+      case Token::GT: return (op1 > op2);
+      case Token::LTE: return (op1 <= op2);
+      case Token::GTE: return (op1 >= op2);
+      default:
+        UNREACHABLE();
+        return false;
+    }
+  }
+
   static bool IsBitOp(Value op) {
     return (BIT_OR <= op && op <= SHR) || op == BIT_NOT;
   }
diff --git a/src/ppc/assembler-ppc-inl.h b/src/ppc/assembler-ppc-inl.h
index b384d3f..42e2208 100644
--- a/src/ppc/assembler-ppc-inl.h
+++ b/src/ppc/assembler-ppc-inl.h
@@ -202,8 +202,8 @@
                                    icache_flush_mode);
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
       target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target));
   }
 }
 
@@ -248,9 +248,8 @@
   Address address = cell->address() + Cell::kValueOffset;
   Memory::Address_at(pc_) = address;
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
-    // TODO(1550) We are passing NULL as a slot because cell can never be on
-    // evacuation candidate.
-    host()->GetHeap()->incremental_marking()->RecordWrite(host(), NULL, cell);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+                                                                  cell);
   }
 }
 
@@ -329,39 +328,6 @@
 }
 
 
-bool RelocInfo::IsPatchedReturnSequence() {
-  //
-  // The patched return sequence is defined by
-  // BreakLocation::SetDebugBreakAtReturn()
-  // FIXED_SEQUENCE
-
-  Instr instr0 = Assembler::instr_at(pc_);
-  Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
-#if V8_TARGET_ARCH_PPC64
-  Instr instr3 = Assembler::instr_at(pc_ + (3 * Assembler::kInstrSize));
-  Instr instr4 = Assembler::instr_at(pc_ + (4 * Assembler::kInstrSize));
-  Instr binstr = Assembler::instr_at(pc_ + (7 * Assembler::kInstrSize));
-#else
-  Instr binstr = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize);
-#endif
-  bool patched_return =
-      ((instr0 & kOpcodeMask) == ADDIS && (instr1 & kOpcodeMask) == ORI &&
-#if V8_TARGET_ARCH_PPC64
-       (instr3 & kOpcodeMask) == ORIS && (instr4 & kOpcodeMask) == ORI &&
-#endif
-       (binstr == 0x7d821008));  // twge r2, r2
-
-  // printf("IsPatchedReturnSequence: %d\n", patched_return);
-  return patched_return;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
-  Instr current_instr = Assembler::instr_at(pc_);
-  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/ppc/assembler-ppc.cc b/src/ppc/assembler-ppc.cc
index 147fb59..aed149b 100644
--- a/src/ppc/assembler-ppc.cc
+++ b/src/ppc/assembler-ppc.cc
@@ -55,7 +55,7 @@
 
 void CpuFeatures::ProbeImpl(bool cross_compile) {
   supported_ |= CpuFeaturesImpliedByCompiler();
-  cache_line_size_ = 128;
+  icache_line_size_ = 128;
 
   // Only use statically determined features for cross compile (snapshot).
   if (cross_compile) return;
@@ -85,6 +85,9 @@
     // Assume support
     supported_ |= (1u << FPU);
   }
+  if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
+    icache_line_size_ = cpu.icache_line_size();
+  }
 #elif V8_OS_AIX
   // Assume support FP support and default cache line size
   supported_ |= (1u << FPU);
@@ -1504,14 +1507,14 @@
 // Code address skips the function descriptor "header".
 // TOC and static chain are ignored and set to 0.
 void Assembler::function_descriptor() {
-#if ABI_USES_FUNCTION_DESCRIPTORS
-  Label instructions;
-  DCHECK(pc_offset() == 0);
-  emit_label_addr(&instructions);
-  dp(0);
-  dp(0);
-  bind(&instructions);
-#endif
+  if (ABI_USES_FUNCTION_DESCRIPTORS) {
+    Label instructions;
+    DCHECK(pc_offset() == 0);
+    emit_label_addr(&instructions);
+    dp(0);
+    dp(0);
+    bind(&instructions);
+  }
 }
 
 
diff --git a/src/ppc/assembler-ppc.h b/src/ppc/assembler-ppc.h
index e84d695..58c6c94 100644
--- a/src/ppc/assembler-ppc.h
+++ b/src/ppc/assembler-ppc.h
@@ -46,15 +46,24 @@
 #include "src/assembler.h"
 #include "src/ppc/constants-ppc.h"
 
-#define ABI_USES_FUNCTION_DESCRIPTORS \
-  (V8_HOST_ARCH_PPC && (V8_OS_AIX ||    \
-                      (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN)))
+#if V8_HOST_ARCH_PPC && \
+    (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
+#define ABI_USES_FUNCTION_DESCRIPTORS 1
+#else
+#define ABI_USES_FUNCTION_DESCRIPTORS 0
+#endif
 
-#define ABI_PASSES_HANDLES_IN_REGS \
-  (!V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64)
+#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#define ABI_PASSES_HANDLES_IN_REGS 1
+#else
+#define ABI_PASSES_HANDLES_IN_REGS 0
+#endif
 
-#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \
-  (!V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN)
+#if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
+#else
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
+#endif
 
 #if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
 #define ABI_CALL_VIA_IP 1
@@ -63,9 +72,9 @@
 #endif
 
 #if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
-#define ABI_TOC_REGISTER Register::kCode_r2
+#define ABI_TOC_REGISTER 2
 #else
-#define ABI_TOC_REGISTER Register::kCode_r13
+#define ABI_TOC_REGISTER 13
 #endif
 
 #define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
@@ -247,7 +256,7 @@
 
 // Coprocessor register
 struct CRegister {
-  bool is_valid() const { return 0 <= reg_code && reg_code < 16; }
+  bool is_valid() const { return 0 <= reg_code && reg_code < 8; }
   bool is(CRegister creg) const { return reg_code == creg.reg_code; }
   int code() const {
     DCHECK(is_valid());
@@ -273,14 +282,9 @@
 const CRegister cr5 = {5};
 const CRegister cr6 = {6};
 const CRegister cr7 = {7};
-const CRegister cr8 = {8};
-const CRegister cr9 = {9};
-const CRegister cr10 = {10};
-const CRegister cr11 = {11};
-const CRegister cr12 = {12};
-const CRegister cr13 = {13};
-const CRegister cr14 = {14};
-const CRegister cr15 = {15};
+
+// TODO(ppc) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
 
 // -----------------------------------------------------------------------------
 // Machine instruction Operands
@@ -1203,7 +1207,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, const SourcePosition position);
+  void RecordDeoptReason(const int reason, int raw_position);
 
   // Writes a single byte or word of data in the code stream.  Used
   // for inline tables, e.g., jump-tables.
diff --git a/src/ppc/builtins-ppc.cc b/src/ppc/builtins-ppc.cc
index 0476cd2..f0b76cc 100644
--- a/src/ppc/builtins-ppc.cc
+++ b/src/ppc/builtins-ppc.cc
@@ -136,6 +136,107 @@
 
 
 // static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- r3                 : number of arguments
+  //  -- lr                 : return address
+  //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- sp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in r4 and the double value in d1.
+  __ LoadRoot(r4, root_index);
+  __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+
+  // Setup state for loop
+  // r5: address of arg[0] + kPointerSize
+  // r6: number of slots to drop at exit (arguments + receiver)
+  __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+  __ add(r5, sp, r5);
+  __ addi(r6, r3, Operand(1));
+
+  Label done_loop, loop;
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ cmpl(r5, sp);
+    __ ble(&done_loop);
+
+    // Load the next parameter tagged value into r3.
+    __ LoadPU(r3, MemOperand(r5, -kPointerSize));
+
+    // Load the double value of the parameter into d2, maybe converting the
+    // parameter to a number first using the ToNumberStub if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(r3, &convert_smi);
+    __ LoadP(r7, FieldMemOperand(r3, HeapObject::kMapOffset));
+    __ JumpIfRoot(r7, Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumberStub to convert it.
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(r6);
+      __ Push(r4, r5, r6);
+      ToNumberStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ Pop(r4, r5, r6);
+      __ SmiUntag(r6);
+      {
+        // Restore the double accumulator value (d1).
+        Label done_restore;
+        __ SmiToDouble(d1, r4);
+        __ JumpIfSmi(r4, &done_restore);
+        __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+        __ bind(&done_restore);
+      }
+    }
+    __ b(&convert);
+    __ bind(&convert_number);
+    __ lfd(d2, FieldMemOperand(r3, HeapNumber::kValueOffset));
+    __ b(&done_convert);
+    __ bind(&convert_smi);
+    __ SmiToDouble(d2, r3);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (d1) and the next parameter value on the right hand side (d2).
+    Label compare_nan, compare_swap;
+    __ fcmpu(d1, d2);
+    __ bunordered(&compare_nan);
+    __ b(cond_done, &loop);
+    __ b(CommuteCondition(cond_done), &compare_swap);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ TestDoubleIsMinusZero(reg, r7, r8);
+    __ bne(&loop);
+
+    // Update accumulator. Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ fmr(d1, d2);
+    __ mr(r4, r3);
+    __ b(&loop);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    // We still need to visit the rest of the arguments.
+    __ bind(&compare_nan);
+    __ LoadRoot(r4, Heap::kNanValueRootIndex);
+    __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+    __ b(&loop);
+  }
+
+  __ bind(&done_loop);
+  __ mr(r3, r4);
+  __ Drop(r6);
+  __ Ret();
+}
+
+// static
 void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r3                     : number of arguments
@@ -230,8 +331,9 @@
   __ bind(&new_object);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ Push(r5, r4, r6);  // first argument, constructor, new target
-    __ CallRuntime(Runtime::kNewObject);
+    __ Push(r5);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(r5);
   }
   __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
@@ -359,8 +461,9 @@
   __ bind(&new_object);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ Push(r5, r4, r6);  // first argument, constructor, new target
-    __ CallRuntime(Runtime::kNewObject);
+    __ Push(r5);  // first argument
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(r5);
   }
   __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
@@ -368,24 +471,6 @@
 }
 
 
-static void CallRuntimePassFunction(MacroAssembler* masm,
-                                    Runtime::FunctionId function_id) {
-  // ----------- S t a t e -------------
-  //  -- r4 : target function (preserved for callee)
-  //  -- r6 : new target (preserved for callee)
-  // -----------------------------------
-
-  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-  // Push a copy of the target function and the new target.
-  // Push function as parameter to the runtime call.
-  __ Push(r4, r6, r4);
-
-  __ CallRuntime(function_id, 1);
-  // Restore target function and new target.
-  __ Pop(r4, r6);
-}
-
-
 static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
   __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
   __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
@@ -393,9 +478,29 @@
   __ JumpToJSEntry(ip);
 }
 
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- r3 : argument count (preserved for callee)
+  //  -- r4 : target function (preserved for callee)
+  //  -- r6 : new target (preserved for callee)
+  // -----------------------------------
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    // Push a copy of the target function and the new target.
+    // Push function as parameter to the runtime call.
+    __ SmiTag(r3);
+    __ Push(r3, r4, r6, r4);
 
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
-  __ addi(ip, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ CallRuntime(function_id, 1);
+    __ mr(r5, r3);
+
+    // Restore target function and new target.
+    __ Pop(r3, r4, r6);
+    __ SmiUntag(r3);
+  }
+  __ addi(ip, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ JumpToJSEntry(ip);
 }
 
@@ -411,8 +516,7 @@
   __ cmpl(sp, ip);
   __ bge(&ok);
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
 
   __ bind(&ok);
   GenerateTailCallToSharedCode(masm);
@@ -421,7 +525,8 @@
 
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
-                                           bool create_implicit_receiver) {
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- r3     : number of arguments
   //  -- r4     : constructor function
@@ -448,142 +553,18 @@
       __ SmiTag(r3);
       __ Push(r5, r3);
 
-      // Try to allocate the object without transitioning into C code. If any of
-      // the preconditions is not met, the code bails out to the runtime call.
-      Label rt_call, allocated;
-      if (FLAG_inline_new) {
-        // Verify that the new target is a JSFunction.
-        __ CompareObjectType(r6, r8, r7, JS_FUNCTION_TYPE);
-        __ bne(&rt_call);
-
-        // Load the initial map and verify that it is in fact a map.
-        // r6: new target
-        __ LoadP(r5,
-                 FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
-        __ JumpIfSmi(r5, &rt_call);
-        __ CompareObjectType(r5, r8, r7, MAP_TYPE);
-        __ bne(&rt_call);
-
-        // Fall back to runtime if the expected base constructor and base
-        // constructor differ.
-        __ LoadP(r8, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
-        __ cmp(r4, r8);
-        __ bne(&rt_call);
-
-        // Check that the constructor is not constructing a JSFunction (see
-        // comments in Runtime_NewObject in runtime.cc). In which case the
-        // initial map's instance type would be JS_FUNCTION_TYPE.
-        // r4: constructor function
-        // r5: initial map
-        // r6: new target
-        __ CompareInstanceType(r5, r8, JS_FUNCTION_TYPE);
-        __ beq(&rt_call);
-
-        // Now allocate the JSObject on the heap.
-        // r4: constructor function
-        // r5: initial map
-        // r6: new target
-        __ lbz(r10, FieldMemOperand(r5, Map::kInstanceSizeOffset));
-
-        __ Allocate(r10, r7, r10, r9, &rt_call, SIZE_IN_WORDS);
-
-        // Allocated the JSObject, now initialize the fields. Map is set to
-        // initial map and properties and elements are set to empty fixed array.
-        // r4: constructor function
-        // r5: initial map
-        // r6: new target
-        // r7: JSObject (not HeapObject tagged - the actual address).
-        // r10: start of next object
-        __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
-        __ StoreP(r5, MemOperand(r7, JSObject::kMapOffset));
-        __ StoreP(r9, MemOperand(r7, JSObject::kPropertiesOffset));
-        __ StoreP(r9, MemOperand(r7, JSObject::kElementsOffset));
-        __ addi(r8, r7, Operand(JSObject::kElementsOffset + kPointerSize));
-
-        // Add the object tag to make the JSObject real, so that we can continue
-        // and jump into the continuation code at any time from now on.
-        __ addi(r7, r7, Operand(kHeapObjectTag));
-
-        // Fill all the in-object properties with the appropriate filler.
-        // r7: JSObject (tagged)
-        // r8: First in-object property of JSObject (not tagged)
-        __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
-
-        if (!is_api_function) {
-          Label no_inobject_slack_tracking;
-
-          MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
-          // Check if slack tracking is enabled.
-          __ lwz(r3, bit_field3);
-          __ DecodeField<Map::ConstructionCounter>(r11, r3);
-          // r11: slack tracking counter
-          __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
-          __ blt(&no_inobject_slack_tracking);
-          // Decrease generous allocation count.
-          __ Add(r3, r3, -(1 << Map::ConstructionCounter::kShift), r0);
-          __ stw(r3, bit_field3);
-
-          // Allocate object with a slack.
-          __ lbz(r3, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
-          __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
-          __ sub(r3, r10, r3);
-          // r3: offset of first field after pre-allocated fields
-          if (FLAG_debug_code) {
-            __ cmp(r8, r3);
-            __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
-          }
-          __ InitializeFieldsWithFiller(r8, r3, r9);
-
-          // To allow truncation fill the remaining fields with one pointer
-          // filler map.
-          __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
-          __ InitializeFieldsWithFiller(r8, r10, r9);
-
-          // r11: slack tracking counter value before decreasing.
-          __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
-          __ bne(&allocated);
-
-          // Push the constructor, new_target and the object to the stack,
-          // and then the initial map as an argument to the runtime call.
-          __ Push(r4, r6, r7, r5);
-          __ CallRuntime(Runtime::kFinalizeInstanceSize);
-          __ Pop(r4, r6, r7);
-
-          // Continue with JSObject being successfully allocated
-          // r4: constructor function
-          // r6: new target
-          // r7: JSObject
-          __ b(&allocated);
-
-          __ bind(&no_inobject_slack_tracking);
-        }
-
-        __ InitializeFieldsWithFiller(r8, r10, r9);
-
-        // Continue with JSObject being successfully allocated
-        // r4: constructor function
-        // r6: new target
-        // r7: JSObject
-        __ b(&allocated);
-      }
-
-      // Allocate the new receiver object using the runtime call.
-      // r4: constructor function
-      // r6: new target
-      __ bind(&rt_call);
-
-      // Push the constructor and new_target twice, second pair as arguments
-      // to the runtime call.
-      __ Push(r4, r6, r4, r6);
-      __ CallRuntime(Runtime::kNewObject);
+      // Allocate the new receiver object.
+      __ Push(r4, r6);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
       __ mr(r7, r3);
       __ Pop(r4, r6);
 
-      // Receiver for constructor call allocated.
-      // r4: constructor function
-      // r6: new target
-      // r7: JSObject
-      __ bind(&allocated);
+      // ----------- S t a t e -------------
+      //  -- r4: constructor function
+      //  -- r6: new target
+      //  -- r7: newly allocated object
+      // -----------------------------------
 
       // Retrieve smi-tagged arguments count from the stack.
       __ LoadP(r3, MemOperand(sp));
@@ -680,6 +661,19 @@
     // Leave construct frame.
   }
 
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(r3, &dont_throw);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
   __ SmiToPtrArrayOffset(r4, r4);
   __ add(sp, sp, r4);
   __ addi(sp, sp, Operand(kPointerSize));
@@ -691,17 +685,23 @@
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, true);
+  Generate_JSConstructStubHelper(masm, false, true, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true, true);
+  Generate_JSConstructStubHelper(masm, true, false, false);
 }
 
 
 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, false);
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
 
@@ -846,10 +846,8 @@
 //   o sp: stack pointer
 //   o lr: return address
 //
-// The function builds a JS frame.  Please see JavaScriptFrameConstants in
-// frames-ppc.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -857,17 +855,23 @@
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ PushFixedFrame(r4);
   __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
-  __ push(r6);
-
-  // Push zero for bytecode array offset.
-  __ li(r3, Operand::Zero());
-  __ push(r3);
 
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into kInterpreterBytecodeRegister.
   __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  Label array_done;
+  Register debug_info = r5;
+  DCHECK(!debug_info.is(r3));
+  __ LoadP(debug_info,
+           FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
+  // Load original bytecode array or the debug copy.
   __ LoadP(kInterpreterBytecodeArrayRegister,
            FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+  __ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
+  __ beq(&array_done);
+  __ LoadP(kInterpreterBytecodeArrayRegister,
+           FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+  __ bind(&array_done);
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -878,6 +882,10 @@
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Push new.target, bytecode array and zero for bytecode array offset.
+  __ li(r3, Operand::Zero());
+  __ Push(r6, kInterpreterBytecodeArrayRegister, r3);
+
   // Allocate the local and temporary register file on the stack.
   {
     // Load frame size (word) from the BytecodeArray object.
@@ -908,23 +916,9 @@
 
   // TODO(rmcilroy): List of things not currently dealt with here but done in
   // fullcodegen's prologue:
-  //  - Support profiler (specifically profiling_counter).
   //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Allow simulator stop operations if FLAG_stop_at is set.
   //  - Code aging of the BytecodeArray object.
 
-  // Perform stack guard check.
-  {
-    Label ok;
-    __ LoadRoot(r0, Heap::kStackLimitRootIndex);
-    __ cmp(sp, r0);
-    __ bge(&ok);
-    __ push(kInterpreterBytecodeArrayRegister);
-    __ CallRuntime(Runtime::kStackGuard);
-    __ pop(kInterpreterBytecodeArrayRegister);
-    __ bind(&ok);
-  }
-
   // Load accumulator, register file, bytecode offset, dispatch table into
   // registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -932,10 +926,9 @@
           Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
   __ mov(kInterpreterBytecodeOffsetRegister,
          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ addi(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
 
   // Dispatch to the first bytecode handler for the function.
   __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
@@ -946,7 +939,9 @@
   // and header removal.
   __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Call(ip);
-  __ bkpt(0);  // Does not return here.
+
+  // Even though the first bytecode handler was called, we will never return.
+  __ Abort(kUnexpectedReturnFromBytecodeHandler);
 }
 
 
@@ -983,7 +978,8 @@
 
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- r3 : the number of arguments (not including the receiver)
   //  -- r5 : the address of the first argument to be pushed. Subsequent
@@ -999,7 +995,9 @@
   Generate_InterpreterPushArgs(masm, r5, r6, r7);
 
   // Call the target.
-  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            tail_call_mode),
+          RelocInfo::CODE_TARGET);
 }
 
 
@@ -1028,45 +1026,24 @@
 }
 
 
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    // Save accumulator register and pass the deoptimization type to
-    // the runtime system.
-    __ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(type)));
-    __ Push(kInterpreterAccumulatorRegister, r4);
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-    __ pop(kInterpreterAccumulatorRegister);  // Restore accumulator register.
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use these for interpreter deopts).
-  __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
   // Initialize register file register and dispatch table register.
   __ addi(kInterpreterRegisterFileRegister, fp,
           Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ addi(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ mov(kInterpreterDispatchTableRegister,
+         Operand(ExternalReference::interpreter_dispatch_table_address(
+             masm->isolate())));
 
   // Get the context from the frame.
-  // TODO(rmcilroy): Update interpreter frame to expect current context at the
-  // context slot instead of the function context.
   __ LoadP(kContextRegister,
            MemOperand(kInterpreterRegisterFileRegister,
                       InterpreterFrameConstants::kContextFromRegisterPointer));
 
   // Get the bytecode array pointer from the frame.
-  __ LoadP(r4,
-           MemOperand(kInterpreterRegisterFileRegister,
-                      InterpreterFrameConstants::kFunctionFromRegisterPointer));
-  __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadP(kInterpreterBytecodeArrayRegister,
-           FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
+  __ LoadP(
+      kInterpreterBytecodeArrayRegister,
+      MemOperand(kInterpreterRegisterFileRegister,
+                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -1094,6 +1071,29 @@
 }
 
 
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+    MacroAssembler* masm, Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass the deoptimization type to the runtime system.
+    __ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(type)));
+    __ Push(r4);
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+    // Tear down internal frame.
+  }
+
+  // Drop state (we don't use these for interpreter deopts) and and pop the
+  // accumulator value into the accumulator register.
+  __ Drop(1);
+  __ Pop(kInterpreterAccumulatorRegister);
+
+  // Enter the bytecode dispatch.
+  Generate_EnterBytecodeDispatch(masm);
+}
+
+
 void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
 }
@@ -1108,22 +1108,32 @@
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the address of the interpreter entry trampoline as a return address.
+  // This simulates the initial call to bytecode handlers in interpreter entry
+  // trampoline. The return will never actually be taken, but our stack walker
+  // uses this address to determine whether a frame is interpreted.
+  __ mov(r0,
+         Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+  __ mtlr(r0);
+
+  Generate_EnterBytecodeDispatch(masm);
+}
+
 
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
 }
 
 
 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
 }
 
 
@@ -1346,13 +1356,12 @@
 
   // Load the next prototype.
   __ bind(&next_prototype);
-  __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
-  // End if the prototype is null or not hidden.
-  __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
-  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ lwz(scratch, FieldMemOperand(map, Map::kBitField3Offset));
-  __ DecodeField<Map::IsHiddenPrototype>(scratch, SetRC);
+  __ DecodeField<Map::HasHiddenPrototype>(scratch, SetRC);
   __ beq(receiver_check_failed, cr0);
+
+  __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Iterate.
   __ b(&prototype_loop_start);
 
@@ -1868,9 +1877,7 @@
 
     // Try to create the list from an arguments object.
     __ bind(&create_arguments);
-    __ LoadP(r5, FieldMemOperand(
-                     r3, JSObject::kHeaderSize +
-                             Heap::kArgumentsLengthIndex * kPointerSize));
+    __ LoadP(r5, FieldMemOperand(r3, JSArgumentsObject::kLengthOffset));
     __ LoadP(r7, FieldMemOperand(r3, JSObject::kElementsOffset));
     __ LoadP(ip, FieldMemOperand(r7, FixedArray::kLengthOffset));
     __ cmp(r5, ip);
@@ -1946,10 +1953,138 @@
   }
 }
 
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg   <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if the debugger is not active.
+  Label done;
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(masm->isolate());
+  __ mov(scratch1, Operand(debug_is_active));
+  __ lbz(scratch1, MemOperand(scratch1));
+  __ cmpi(scratch1, Operand::Zero());
+  __ bne(&done);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ LoadP(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+    __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
+    __ bne(&no_interpreter_frame);
+    __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(scratch3,
+           MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&no_arguments_adaptor);
+
+  // Drop arguments adaptor frame and load arguments count.
+  __ mr(fp, scratch2);
+  __ LoadP(scratch1,
+           MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(scratch1);
+  __ b(&formal_parameter_count_loaded);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ LoadP(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadP(scratch1,
+           FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadWordArith(
+      scratch1, FieldMemOperand(
+                    scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_PPC64
+  __ SmiUntag(scratch1);
+#endif
+
+  __ bind(&formal_parameter_count_loaded);
+
+  // Calculate the end of destination area where we will put the arguments
+  // after we drop current frame. We add kPointerSize to count the receiver
+  // argument which is not included into formal parameters count.
+  Register dst_reg = scratch2;
+  __ ShiftLeftImm(dst_reg, scratch1, Operand(kPointerSizeLog2));
+  __ add(dst_reg, fp, dst_reg);
+  __ addi(dst_reg, dst_reg,
+          Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+  Register src_reg = scratch1;
+  __ ShiftLeftImm(src_reg, args_reg, Operand(kPointerSizeLog2));
+  __ add(src_reg, sp, src_reg);
+  // Count receiver argument as well (not included in args_reg).
+  __ addi(src_reg, src_reg, Operand(kPointerSize));
+
+  if (FLAG_debug_code) {
+    __ cmpl(src_reg, dst_reg);
+    __ Check(lt, kStackAccessBelowStackPointer);
+  }
+
+  // Restore caller's frame pointer and return address now as they will be
+  // overwritten by the copying loop.
+  __ RestoreFrameStateForTailCall();
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+
+  // Both src_reg and dst_reg are pointing to the word after the one to copy,
+  // so they must be pre-decremented in the loop.
+  Register tmp_reg = scratch3;
+  Label loop;
+  __ addi(tmp_reg, args_reg, Operand(1));  // +1 for receiver
+  __ mtctr(tmp_reg);
+  __ bind(&loop);
+  __ LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
+  __ StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
+  __ bdnz(&loop);
+
+  // Leave current frame.
+  __ mr(sp, dst_reg);
+
+  __ bind(&done);
+}
+}  // namespace
 
 // static
 void Builtins::Generate_CallFunction(MacroAssembler* masm,
-                                     ConvertReceiverMode mode) {
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- r3 : the number of arguments (not including the receiver)
   //  -- r4 : the function to call (checked to be a JSFunction)
@@ -2034,6 +2169,10 @@
   //  -- cp : the function context.
   // -----------------------------------
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r3, r6, r7, r8);
+  }
+
   __ LoadWordArith(
       r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
 #if !V8_TARGET_ARCH_PPC64
@@ -2094,7 +2233,7 @@
       {
         FrameScope scope(masm, StackFrame::MANUAL);
         __ EnterFrame(StackFrame::INTERNAL);
-        __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+        __ CallRuntime(Runtime::kThrowStackOverflow);
       }
       __ bind(&done);
     }
@@ -2138,13 +2277,18 @@
 
 
 // static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- r3 : the number of arguments (not including the receiver)
   //  -- r4 : the function to call (checked to be a JSBoundFunction)
   // -----------------------------------
   __ AssertBoundFunction(r4);
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r3, r6, r7, r8);
+  }
+
   // Patch the receiver to [[BoundThis]].
   __ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
   __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
@@ -2165,7 +2309,8 @@
 
 
 // static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- r3 : the number of arguments (not including the receiver)
   //  -- r4 : the target to call (can be any Object).
@@ -2175,14 +2320,25 @@
   __ JumpIfSmi(r4, &non_callable);
   __ bind(&non_smi);
   __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
-  __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+  __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
           RelocInfo::CODE_TARGET, eq);
   __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
-  __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+  __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
           RelocInfo::CODE_TARGET, eq);
+
+  // Check if target has a [[Call]] internal method.
+  __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+  __ TestBit(r7, Map::kIsCallable, r0);
+  __ beq(&non_callable, cr0);
+
   __ cmpi(r8, Operand(JS_PROXY_TYPE));
   __ bne(&non_function);
 
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, r3, r6, r7, r8);
+  }
+
   // 1. Runtime fallback for Proxy [[Call]].
   __ Push(r4);
   // Increase the arguments size to include the pushed function and the
@@ -2195,17 +2351,13 @@
   // 2. Call to something else, which might have a [[Call]] internal method (if
   // not we raise an exception).
   __ bind(&non_function);
-  // Check if target has a [[Call]] internal method.
-  __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
-  __ TestBit(r7, Map::kIsCallable, r0);
-  __ beq(&non_callable, cr0);
   // Overwrite the original receiver the (original) target.
   __ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
   __ StorePX(r4, MemOperand(sp, r8));
   // Let the "call_as_function_delegate" take care of the rest.
   __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
   __ Jump(masm->isolate()->builtins()->CallFunction(
-              ConvertReceiverMode::kNotNullOrUndefined),
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
           RelocInfo::CODE_TARGET);
 
   // 3. Call to something that is not callable.
diff --git a/src/ppc/code-stubs-ppc.cc b/src/ppc/code-stubs-ppc.cc
index 26fbe98..03c73af 100644
--- a/src/ppc/code-stubs-ppc.cc
+++ b/src/ppc/code-stubs-ppc.cc
@@ -91,9 +91,8 @@
 
 #define __ ACCESS_MASM(masm)
 
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
-                                          Condition cond, Strength strength);
+                                          Condition cond);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
                                     Register rhs, Label* lhs_not_nan,
                                     Label* slow, bool strict);
@@ -248,7 +247,7 @@
 // Equality is almost reflexive (everything but NaN), so this is a test
 // for "identity and not NaN".
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
-                                          Condition cond, Strength strength) {
+                                          Condition cond) {
   Label not_identical;
   Label heap_number, return_equal;
   __ cmp(r3, r4);
@@ -268,14 +267,6 @@
     // Call runtime on identical SIMD values since we must throw a TypeError.
     __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
     __ beq(slow);
-    if (is_strong(strength)) {
-      // Call the runtime on anything that is converted in the semantics, since
-      // we need to throw a TypeError. Smis have already been ruled out.
-      __ cmpi(r7, Operand(HEAP_NUMBER_TYPE));
-      __ beq(&return_equal);
-      __ andi(r0, r7, Operand(kIsNotStringMask));
-      __ bne(slow, cr0);
-    }
   } else {
     __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
     __ beq(&heap_number);
@@ -289,13 +280,6 @@
       // Call runtime on identical SIMD values since we must throw a TypeError.
       __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
       __ beq(slow);
-      if (is_strong(strength)) {
-        // Call the runtime on anything that is converted in the semantics,
-        // since we need to throw a TypeError. Smis and heap numbers have
-        // already been ruled out.
-        __ andi(r0, r7, Operand(kIsNotStringMask));
-        __ bne(slow, cr0);
-      }
       // Normally here we fall through to return_equal, but undefined is
       // special: (undefined == undefined) == true, but
       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
@@ -515,40 +499,49 @@
 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
                                                      Register lhs, Register rhs,
                                                      Label* possible_strings,
-                                                     Label* not_both_strings) {
+                                                     Label* runtime_call) {
   DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
 
   // r5 is object type of rhs.
-  Label object_test;
+  Label object_test, return_unequal, undetectable;
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   __ andi(r0, r5, Operand(kIsNotStringMask));
   __ bne(&object_test, cr0);
   __ andi(r0, r5, Operand(kIsNotInternalizedMask));
   __ bne(possible_strings, cr0);
   __ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE);
-  __ bge(not_both_strings);
+  __ bge(runtime_call);
   __ andi(r0, r6, Operand(kIsNotInternalizedMask));
   __ bne(possible_strings, cr0);
 
-  // Both are internalized.  We already checked they weren't the same pointer
-  // so they are not equal.
-  __ li(r3, Operand(NOT_EQUAL));
+  // Both are internalized. We already checked they weren't the same pointer so
+  // they are not equal. Return non-equal by returning the non-zero object
+  // pointer in r3.
   __ Ret();
 
   __ bind(&object_test);
-  __ cmpi(r5, Operand(FIRST_JS_RECEIVER_TYPE));
-  __ blt(not_both_strings);
-  __ CompareObjectType(lhs, r5, r6, FIRST_JS_RECEIVER_TYPE);
-  __ blt(not_both_strings);
-  // If both objects are undetectable, they are equal. Otherwise, they
-  // are not equal, since they are different objects and an object is not
-  // equal to undefined.
+  __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
   __ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset));
-  __ lbz(r5, FieldMemOperand(r5, Map::kBitFieldOffset));
-  __ lbz(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
-  __ and_(r3, r5, r6);
-  __ andi(r3, r3, Operand(1 << Map::kIsUndetectable));
-  __ xori(r3, r3, Operand(1 << Map::kIsUndetectable));
+  __ lbz(r7, FieldMemOperand(r5, Map::kBitFieldOffset));
+  __ lbz(r8, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ andi(r0, r7, Operand(1 << Map::kIsUndetectable));
+  __ bne(&undetectable, cr0);
+  __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
+  __ bne(&return_unequal, cr0);
+
+  __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE);
+  __ blt(runtime_call);
+  __ CompareInstanceType(r6, r6, FIRST_JS_RECEIVER_TYPE);
+  __ blt(runtime_call);
+
+  __ bind(&return_unequal);
+  // Return non-equal by returning the non-zero object pointer in r3.
+  __ Ret();
+
+  __ bind(&undetectable);
+  __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
+  __ beq(&return_unequal, cr0);
+  __ li(r3, Operand(EQUAL));
   __ Ret();
 }
 
@@ -600,7 +593,7 @@
 
   // Handle the case where the objects are identical.  Either returns the answer
   // or goes to slow.  Only falls through if the objects were not identical.
-  EmitIdenticalObjectComparison(masm, &slow, cc, strength());
+  EmitIdenticalObjectComparison(masm, &slow, cc);
 
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
@@ -705,11 +698,19 @@
 
   __ bind(&slow);
 
-  __ Push(lhs, rhs);
-  // Figure out which native to call and setup the arguments.
   if (cc == eq) {
-    __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(lhs, rhs);
+      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+    }
+    // Turn true into 0 and false into some non-zero value.
+    STATIC_ASSERT(EQUAL == 0);
+    __ LoadRoot(r4, Heap::kTrueValueRootIndex);
+    __ sub(r3, r3, r4);
+    __ Ret();
   } else {
+    __ Push(lhs, rhs);
     int ncr;  // NaN compare result
     if (cc == lt || cc == le) {
       ncr = GREATER;
@@ -722,8 +723,7 @@
 
     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
     // tagged as a small integer.
-    __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
-                                             : Runtime::kCompare);
+    __ TailCallRuntime(Runtime::kCompare);
   }
 
   __ bind(&miss);
@@ -942,7 +942,6 @@
   __ ConvertIntToDouble(exponent, double_exponent);
 
   // Returning or bailing out.
-  Counters* counters = isolate()->counters();
   if (exponent_type() == ON_STACK) {
     // The arguments are still on the stack.
     __ bind(&call_runtime);
@@ -956,7 +955,6 @@
     __ stfd(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     DCHECK(heapnumber.is(r3));
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
     __ Ret(2);
   } else {
     __ mflr(r0);
@@ -973,7 +971,6 @@
     __ MovFromFloatResult(double_result);
 
     __ bind(&done);
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
     __ Ret();
   }
 }
@@ -1055,14 +1052,13 @@
   // Need at least one extra slot for return address location.
   int arg_stack_space = 1;
 
-// PPC LINUX ABI:
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
   // Pass buffer for return value on stack if necessary
-  if (result_size() > 1) {
-    DCHECK_EQ(2, result_size());
-    arg_stack_space += 2;
+  bool needs_return_buffer =
+      result_size() > 2 ||
+      (result_size() == 2 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
+  if (needs_return_buffer) {
+    arg_stack_space += result_size();
   }
-#endif
 
   __ EnterExitFrame(save_doubles(), arg_stack_space);
 
@@ -1076,9 +1072,8 @@
   // Result returned in registers or stack, depending on result size and ABI.
 
   Register isolate_reg = r5;
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
-  if (result_size() > 1) {
-    // The return value is 16-byte non-scalar value.
+  if (needs_return_buffer) {
+    // The return value is a non-scalar value.
     // Use frame storage reserved by calling function to pass return
     // buffer as implicit first argument.
     __ mr(r5, r4);
@@ -1086,21 +1081,20 @@
     __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
     isolate_reg = r6;
   }
-#endif
 
   // Call C built-in.
   __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
 
   Register target = r15;
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
-  // Native AIX/PPC64 Linux use a function descriptor.
-  __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
-  __ LoadP(ip, MemOperand(r15, 0));  // Instruction address
-  target = ip;
-#elif ABI_CALL_VIA_IP
-  __ Move(ip, r15);
-  target = ip;
-#endif
+  if (ABI_USES_FUNCTION_DESCRIPTORS) {
+    // AIX/PPC64BE Linux use a function descriptor.
+    __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
+    __ LoadP(ip, MemOperand(r15, 0));  // Instruction address
+    target = ip;
+  } else if (ABI_CALL_VIA_IP) {
+    __ Move(ip, r15);
+    target = ip;
+  }
 
   // To let the GC traverse the return address of the exit frames, we need to
   // know where the return address is. The CEntryStub is unmovable, so
@@ -1112,13 +1106,12 @@
   __ Call(target);
   __ bind(&after_call);
 
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
   // If return value is on the stack, pop it to registers.
-  if (result_size() > 1) {
+  if (needs_return_buffer) {
+    if (result_size() > 2) __ LoadP(r5, MemOperand(r3, 2 * kPointerSize));
     __ LoadP(r4, MemOperand(r3, kPointerSize));
     __ LoadP(r3, MemOperand(r3));
   }
-#endif
 
   // Check result for exception sentinel.
   Label exception_returned;
@@ -1132,9 +1125,9 @@
     ExternalReference pending_exception_address(
         Isolate::kPendingExceptionAddress, isolate());
 
-    __ mov(r5, Operand(pending_exception_address));
-    __ LoadP(r5, MemOperand(r5));
-    __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+    __ mov(r6, Operand(pending_exception_address));
+    __ LoadP(r6, MemOperand(r6));
+    __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
     // Cannot use check here as it attempts to generate call into runtime.
     __ beq(&okay);
     __ stop("Unexpected pending exception");
@@ -1538,332 +1531,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  // The displacement is the offset of the last parameter (if any)
-  // relative to the frame pointer.
-  const int kDisplacement =
-      StandardFrameConstants::kCallerSPOffset - kPointerSize;
-  DCHECK(r4.is(ArgumentsAccessReadDescriptor::index()));
-  DCHECK(r3.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
-  // Check that the key is a smi.
-  Label slow;
-  __ JumpIfNotSmi(r4, &slow);
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor;
-  __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
-  STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
-  __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ beq(&adaptor);
-
-  // Check index against formal parameters count limit passed in
-  // through register r3. Use unsigned comparison to get negative
-  // check for free.
-  __ cmpl(r4, r3);
-  __ bge(&slow);
-
-  // Read the argument from the stack and return it.
-  __ sub(r6, r3, r4);
-  __ SmiToPtrArrayOffset(r6, r6);
-  __ add(r6, fp, r6);
-  __ LoadP(r3, MemOperand(r6, kDisplacement));
-  __ blr();
-
-  // Arguments adaptor case: Check index against actual arguments
-  // limit found in the arguments adaptor frame. Use unsigned
-  // comparison to get negative check for free.
-  __ bind(&adaptor);
-  __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ cmpl(r4, r3);
-  __ bge(&slow);
-
-  // Read the argument from the adaptor frame and return it.
-  __ sub(r6, r3, r4);
-  __ SmiToPtrArrayOffset(r6, r6);
-  __ add(r6, r5, r6);
-  __ LoadP(r3, MemOperand(r6, kDisplacement));
-  __ blr();
-
-  // Slow-case: Handle non-smi or out-of-bounds access to arguments
-  // by calling the runtime system.
-  __ bind(&slow);
-  __ push(r4);
-  __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
-  // r4 : function
-  // r5 : number of parameters (tagged)
-  // r6 : parameters pointer
-
-  DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
-  __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
-  __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ bne(&runtime);
-
-  // Patch the arguments.length and the parameters pointer in the current frame.
-  __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiToPtrArrayOffset(r6, r5);
-  __ add(r6, r6, r7);
-  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  __ bind(&runtime);
-  __ Push(r4, r6, r5);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
-  // r4 : function
-  // r5 : number of parameters (tagged)
-  // r6 : parameters pointer
-  // Registers used over whole function:
-  // r8 : arguments count (tagged)
-  // r9 : mapped parameter count (tagged)
-
-  DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
-  __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ beq(&adaptor_frame);
-
-  // No adaptor, parameter count = argument count.
-  __ mr(r8, r5);
-  __ mr(r9, r5);
-  __ b(&try_allocate);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiToPtrArrayOffset(r6, r8);
-  __ add(r6, r6, r7);
-  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // r8 = argument count (tagged)
-  // r9 = parameter count (tagged)
-  // Compute the mapped parameter count = min(r5, r8) in r9.
-  __ cmp(r5, r8);
-  if (CpuFeatures::IsSupported(ISELECT)) {
-    __ isel(lt, r9, r5, r8);
-  } else {
-    Label skip;
-    __ mr(r9, r5);
-    __ blt(&skip);
-    __ mr(r9, r8);
-    __ bind(&skip);
-  }
-
-  __ bind(&try_allocate);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  // If there are no mapped parameters, we do not need the parameter_map.
-  __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
-  if (CpuFeatures::IsSupported(ISELECT)) {
-    __ SmiToPtrArrayOffset(r11, r9);
-    __ addi(r11, r11, Operand(kParameterMapHeaderSize));
-    __ isel(eq, r11, r0, r11);
-  } else {
-    Label skip2, skip3;
-    __ bne(&skip2);
-    __ li(r11, Operand::Zero());
-    __ b(&skip3);
-    __ bind(&skip2);
-    __ SmiToPtrArrayOffset(r11, r9);
-    __ addi(r11, r11, Operand(kParameterMapHeaderSize));
-    __ bind(&skip3);
-  }
-
-  // 2. Backing store.
-  __ SmiToPtrArrayOffset(r7, r8);
-  __ add(r11, r11, r7);
-  __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
-
-  // r3 = address of new object(s) (tagged)
-  // r5 = argument count (smi-tagged)
-  // Get the arguments boilerplate from the current native context into r4.
-  const int kNormalOffset =
-      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
-  const int kAliasedOffset =
-      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
-  __ LoadP(r7, NativeContextMemOperand());
-  __ cmpi(r9, Operand::Zero());
-  if (CpuFeatures::IsSupported(ISELECT)) {
-    __ LoadP(r11, MemOperand(r7, kNormalOffset));
-    __ LoadP(r7, MemOperand(r7, kAliasedOffset));
-    __ isel(eq, r7, r11, r7);
-  } else {
-    Label skip4, skip5;
-    __ bne(&skip4);
-    __ LoadP(r7, MemOperand(r7, kNormalOffset));
-    __ b(&skip5);
-    __ bind(&skip4);
-    __ LoadP(r7, MemOperand(r7, kAliasedOffset));
-    __ bind(&skip5);
-  }
-
-  // r3 = address of new object (tagged)
-  // r5 = argument count (smi-tagged)
-  // r7 = address of arguments map (tagged)
-  // r9 = mapped parameter count (tagged)
-  __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
-  __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
-  __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
-  __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
-  // Set up the callee in-object property.
-  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
-  __ AssertNotSmi(r4);
-  const int kCalleeOffset =
-      JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
-  __ StoreP(r4, FieldMemOperand(r3, kCalleeOffset), r0);
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(r8);
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  const int kLengthOffset =
-      JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
-  __ StoreP(r8, FieldMemOperand(r3, kLengthOffset), r0);
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, r7 will point there, otherwise
-  // it will point to the backing store.
-  __ addi(r7, r3, Operand(Heap::kSloppyArgumentsObjectSize));
-  __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
-  // r3 = address of new object (tagged)
-  // r5 = argument count (tagged)
-  // r7 = address of parameter map or backing store (tagged)
-  // r9 = mapped parameter count (tagged)
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
-  if (CpuFeatures::IsSupported(ISELECT)) {
-    __ isel(eq, r4, r7, r4);
-    __ beq(&skip_parameter_map);
-  } else {
-    Label skip6;
-    __ bne(&skip6);
-    // Move backing store address to r4, because it is
-    // expected there when filling in the unmapped arguments.
-    __ mr(r4, r7);
-    __ b(&skip_parameter_map);
-    __ bind(&skip6);
-  }
-
-  __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
-  __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
-  __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
-  __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
-  __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
-            r0);
-  __ SmiToPtrArrayOffset(r8, r9);
-  __ add(r8, r8, r7);
-  __ addi(r8, r8, Operand(kParameterMapHeaderSize));
-  __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
-            r0);
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop;
-  __ mr(r8, r9);
-  __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
-  __ sub(r11, r11, r9);
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ SmiToPtrArrayOffset(r4, r8);
-  __ add(r4, r4, r7);
-  __ addi(r4, r4, Operand(kParameterMapHeaderSize));
-
-  // r4 = address of backing store (tagged)
-  // r7 = address of parameter map (tagged)
-  // r8 = temporary scratch (a.o., for address calculation)
-  // r10 = temporary scratch (a.o., for address calculation)
-  // ip = the hole value
-  __ SmiUntag(r8);
-  __ mtctr(r8);
-  __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
-  __ add(r10, r4, r8);
-  __ add(r8, r7, r8);
-  __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-
-  __ bind(&parameters_loop);
-  __ StorePU(r11, MemOperand(r8, -kPointerSize));
-  __ StorePU(ip, MemOperand(r10, -kPointerSize));
-  __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
-  __ bdnz(&parameters_loop);
-
-  // Restore r8 = argument count (tagged).
-  __ LoadP(r8, FieldMemOperand(r3, kLengthOffset));
-
-  __ bind(&skip_parameter_map);
-  // r3 = address of new object (tagged)
-  // r4 = address of backing store (tagged)
-  // r8 = argument count (tagged)
-  // r9 = mapped parameter count (tagged)
-  // r11 = scratch
-  // Copy arguments header and remaining slots (if there are any).
-  __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
-  __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
-  __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
-  __ sub(r11, r8, r9, LeaveOE, SetRC);
-  __ Ret(eq, cr0);
-
-  Label arguments_loop;
-  __ SmiUntag(r11);
-  __ mtctr(r11);
-
-  __ SmiToPtrArrayOffset(r0, r9);
-  __ sub(r6, r6, r0);
-  __ add(r11, r4, r0);
-  __ addi(r11, r11,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-
-  __ bind(&arguments_loop);
-  __ LoadPU(r7, MemOperand(r6, -kPointerSize));
-  __ StorePU(r7, MemOperand(r11, kPointerSize));
-  __ bdnz(&arguments_loop);
-
-  // Return.
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  // r8 = argument count (tagged)
-  __ bind(&runtime);
-  __ Push(r4, r6, r8);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
   // Return address is in lr.
   Label slow;
@@ -1887,117 +1554,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
-  // r4 : function
-  // r5 : number of parameters (tagged)
-  // r6 : parameters pointer
-
-  DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label try_allocate, runtime;
-  __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
-  __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ bne(&try_allocate);
-
-  // Patch the arguments.length and the parameters pointer.
-  __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiToPtrArrayOffset(r6, r5);
-  __ add(r6, r6, r7);
-  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Try the new space allocation. Start out with computing the size
-  // of the arguments object and the elements array in words.
-  Label add_arguments_object;
-  __ bind(&try_allocate);
-  __ SmiUntag(r11, r5, SetRC);
-  __ beq(&add_arguments_object, cr0);
-  __ addi(r11, r11, Operand(FixedArray::kHeaderSize / kPointerSize));
-  __ bind(&add_arguments_object);
-  __ addi(r11, r11, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
-
-  // Do the allocation of both objects in one go.
-  __ Allocate(r11, r3, r7, r8, &runtime,
-              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
-  // Get the arguments boilerplate from the current native context.
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r7);
-
-  __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
-  __ LoadRoot(r8, Heap::kEmptyFixedArrayRootIndex);
-  __ StoreP(r8, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
-  __ StoreP(r8, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
-  // Get the length (smi tagged) and set that as an in-object property too.
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  __ AssertSmi(r5);
-  __ StoreP(r5,
-            FieldMemOperand(r3, JSObject::kHeaderSize +
-                                    Heap::kArgumentsLengthIndex * kPointerSize),
-            r0);
-
-  // If there are no actual arguments, we're done.
-  __ SmiUntag(r9, r5, SetRC);
-  __ Ret(eq, cr0);
-
-  // Set up the elements pointer in the allocated arguments object and
-  // initialize the header in the elements fixed array.
-  __ addi(r7, r3, Operand(Heap::kStrictArgumentsObjectSize));
-  __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-  __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex);
-  __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
-  __ StoreP(r5, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
-
-  // Copy the fixed array slots.
-  Label loop;
-  // Set up r7 to point just prior to the first array slot.
-  __ addi(r7, r7,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-  __ mtctr(r9);
-  __ bind(&loop);
-  // Pre-decrement r6 with kPointerSize on each iteration.
-  // Pre-decrement in order to skip receiver.
-  __ LoadPU(r8, MemOperand(r6, -kPointerSize));
-  // Pre-increment r7 with kPointerSize on each iteration.
-  __ StorePU(r8, MemOperand(r7, kPointerSize));
-  __ bdnz(&loop);
-
-  // Return.
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ Push(r4, r6, r5);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
-  // r5 : number of parameters (tagged)
-  // r6 : parameters pointer
-  // r7 : rest parameter index (tagged)
-
-  Label runtime;
-  __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(r3, MemOperand(r8, StandardFrameConstants::kContextOffset));
-  __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ bne(&runtime);
-
-  // Patch the arguments.length and the parameters pointer.
-  __ LoadP(r5, MemOperand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiToPtrArrayOffset(r0, r5);
-  __ add(r6, r8, r0);
-  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  __ bind(&runtime);
-  __ Push(r5, r6, r7);
-  __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
 // Just jump directly to runtime if native RegExp is not selected at compile
 // time or if regexp entry in generated code is turned off runtime switch or
@@ -2085,48 +1641,49 @@
   __ LoadP(subject, MemOperand(sp, kSubjectOffset));
   __ JumpIfSmi(subject, &runtime);
   __ mr(r6, subject);  // Make a copy of the original subject string.
-  __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
   // subject: subject string
   // r6: subject string
-  // r3: subject string instance type
   // regexp_data: RegExp data (FixedArray)
   // Handle subject string according to its encoding and representation:
-  // (1) Sequential string?  If yes, go to (5).
-  // (2) Anything but sequential or cons?  If yes, go to (6).
-  // (3) Cons string.  If the string is flat, replace subject with first string.
-  //     Otherwise bailout.
-  // (4) Is subject external?  If yes, go to (7).
-  // (5) Sequential string.  Load regexp code according to encoding.
+  // (1) Sequential string?  If yes, go to (4).
+  // (2) Sequential or cons?  If not, go to (5).
+  // (3) Cons string.  If the string is flat, replace subject with first string
+  //     and go to (1). Otherwise bail out to runtime.
+  // (4) Sequential string.  Load regexp code according to encoding.
   // (E) Carry on.
   /// [...]
 
   // Deferred code at the end of the stub:
-  // (6) Not a long external string?  If yes, go to (8).
-  // (7) External string.  Make it, offset-wise, look like a sequential string.
-  //     Go to (5).
-  // (8) Short external string or not a string?  If yes, bail out to runtime.
-  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+  // (5) Long external string?  If not, go to (7).
+  // (6) External string.  Make it, offset-wise, look like a sequential string.
+  //     Go to (4).
+  // (7) Short external string or not a string?  If yes, bail out to runtime.
+  // (8) Sliced string.  Replace subject with parent.  Go to (1).
 
-  Label seq_string /* 5 */, external_string /* 7 */, check_underlying /* 4 */,
-      not_seq_nor_cons /* 6 */, not_long_external /* 8 */;
+  Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
+      not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
 
-  // (1) Sequential string?  If yes, go to (5).
+  __ bind(&check_underlying);
+  __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+
+  // (1) Sequential string?  If yes, go to (4).
+
   STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
                  kShortExternalStringMask) == 0x93);
   __ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask |
                           kShortExternalStringMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
-  __ beq(&seq_string, cr0);  // Go to (5).
+  __ beq(&seq_string, cr0);  // Go to (4).
 
-  // (2) Anything but sequential or cons?  If yes, go to (6).
+  // (2) Sequential or cons? If not, go to (5).
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   STATIC_ASSERT(kExternalStringTag < 0xffffu);
   __ cmpi(r4, Operand(kExternalStringTag));
-  __ bge(&not_seq_nor_cons);  // Go to (6).
+  __ bge(&not_seq_nor_cons);  // Go to (5).
 
   // (3) Cons string.  Check that it's flat.
   // Replace subject with first string and reload instance type.
@@ -2134,20 +1691,9 @@
   __ CompareRoot(r3, Heap::kempty_stringRootIndex);
   __ bne(&runtime);
   __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  __ b(&check_underlying);
 
-  // (4) Is subject external?  If yes, go to (7).
-  __ bind(&check_underlying);
-  __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kSeqStringTag == 0);
-  STATIC_ASSERT(kStringRepresentationMask == 3);
-  __ andi(r0, r3, Operand(kStringRepresentationMask));
-  // The underlying external string is never a short external string.
-  STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
-  STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
-  __ bne(&external_string, cr0);  // Go to (7).
-
-  // (5) Sequential string.  Load regexp code according to encoding.
+  // (4) Sequential string.  Load regexp code according to encoding.
   __ bind(&seq_string);
   // subject: sequential subject string (or look-alike, external string)
   // r6: original subject string
@@ -2255,16 +1801,6 @@
   // Locate the code entry and call it.
   __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-
-#if ABI_USES_FUNCTION_DESCRIPTORS && defined(USE_SIMULATOR)
-  // Even Simulated AIX/PPC64 Linux uses a function descriptor for the
-  // RegExp routine.  Extract the instruction address here since
-  // DirectCEntryStub::GenerateCall will not do it for calls out to
-  // what it thinks is C code compiled for the simulator/host
-  // platform.
-  __ LoadP(code, MemOperand(code, 0));  // Instruction address
-#endif
-
   DirectCEntryStub stub(isolate());
   stub.GenerateCall(masm, code);
 
@@ -2390,12 +1926,12 @@
   __ TailCallRuntime(Runtime::kRegExpExec);
 
   // Deferred code for string handling.
-  // (6) Not a long external string?  If yes, go to (8).
+  // (5) Long external string? If not, go to (7).
   __ bind(&not_seq_nor_cons);
   // Compare flags are still set.
-  __ bgt(&not_long_external);  // Go to (8).
+  __ bgt(&not_long_external);  // Go to (7).
 
-  // (7) External string.  Make it, offset-wise, look like a sequential string.
+  // (6) External string.  Make it, offset-wise, look like a sequential string.
   __ bind(&external_string);
   __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
@@ -2412,15 +1948,15 @@
   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   __ subi(subject, subject,
           Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ b(&seq_string);  // Go to (5).
+  __ b(&seq_string);  // Go to (4).
 
-  // (8) Short external string or not a string?  If yes, bail out to runtime.
+  // (7) Short external string or not a string?  If yes, bail out to runtime.
   __ bind(&not_long_external);
   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
   __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask));
   __ bne(&runtime, cr0);
 
-  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+  // (8) Sliced string.  Replace subject with parent.  Go to (4).
   // Load offset into r11 and replace subject string with parent.
   __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ SmiUntag(r11);
@@ -2658,7 +2194,8 @@
 
   __ bind(&call_function);
   __ mov(r3, Operand(argc));
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+                                                    tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&extra_checks_or_miss);
@@ -2696,7 +2233,7 @@
 
   __ bind(&call);
   __ mov(r3, Operand(argc));
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&uninitialized);
@@ -3246,6 +2783,37 @@
 }
 
 
+void ToNameStub::Generate(MacroAssembler* masm) {
+  // The ToName stub takes one argument in r3.
+  Label is_number;
+  __ JumpIfSmi(r3, &is_number);
+
+  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+  __ CompareObjectType(r3, r4, r4, LAST_NAME_TYPE);
+  // r3: receiver
+  // r4: receiver instance type
+  __ Ret(le);
+
+  Label not_heap_number;
+  __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
+  __ bne(&not_heap_number);
+  __ bind(&is_number);
+  NumberToStringStub stub(isolate());
+  __ TailCallStub(&stub);
+  __ bind(&not_heap_number);
+
+  Label not_oddball;
+  __ cmpi(r4, Operand(ODDBALL_TYPE));
+  __ bne(&not_oddball);
+  __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+
+  __ push(r3);  // Push argument.
+  __ TailCallRuntime(Runtime::kToName);
+}
+
+
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -3438,18 +3006,14 @@
 
   __ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
   __ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
-  if (op() != Token::EQ_STRICT && is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
-  } else {
-    if (!Token::IsEqualityOp(op())) {
-      __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
-      __ AssertSmi(r4);
-      __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
-      __ AssertSmi(r3);
-    }
-    __ sub(r3, r4, r3);
-    __ Ret();
+  if (!Token::IsEqualityOp(op())) {
+    __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
+    __ AssertSmi(r4);
+    __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
+    __ AssertSmi(r3);
   }
+  __ sub(r3, r4, r3);
+  __ Ret();
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -3547,7 +3111,7 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
                      CompareICState::GENERIC, CompareICState::GENERIC);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
@@ -3770,8 +3334,6 @@
   if (Token::IsEqualityOp(op())) {
     __ sub(r3, r3, r4);
     __ Ret();
-  } else if (is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   } else {
     if (op() == Token::LT || op() == Token::LTE) {
       __ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
@@ -3820,15 +3382,15 @@
 
 
 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
-  // Native AIX/PPC64 Linux use a function descriptor.
-  __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
-  __ LoadP(ip, MemOperand(target, 0));  // Instruction address
-#else
-  // ip needs to be set for DirectCEentryStub::Generate, and also
-  // for ABI_CALL_VIA_IP.
-  __ Move(ip, target);
-#endif
+  if (ABI_USES_FUNCTION_DESCRIPTORS) {
+    // AIX/PPC64BE Linux use a function descriptor.
+    __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
+    __ LoadP(ip, MemOperand(target, 0));  // Instruction address
+  } else {
+    // ip needs to be set for DirectCEentryStub::Generate, and also
+    // for ABI_CALL_VIA_IP.
+    __ Move(ip, target);
+  }
 
   intptr_t code = reinterpret_cast<intptr_t>(GetCode().location());
   __ mov(r0, Operand(code, RelocInfo::CODE_TARGET));
@@ -4142,9 +3704,8 @@
     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
                            regs_.scratch0(), &dont_need_remembered_set);
 
-    __ CheckPageFlag(regs_.object(), regs_.scratch0(),
-                     1 << MemoryChunk::SCAN_ON_SCAVENGE, ne,
-                     &dont_need_remembered_set);
+    __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+                        &dont_need_remembered_set);
 
     // First notify the incremental marker if necessary, then update the
     // remembered set.
@@ -4791,34 +4352,32 @@
 #if !defined(USE_SIMULATOR)
   uintptr_t entry_hook =
       reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
+#else
+  // Under the simulator we need to indirect the entry hook through a
+  // trampoline function at a known address.
+  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+  ExternalReference entry_hook = ExternalReference(
+      &dispatcher, ExternalReference::BUILTIN_CALL, isolate());
+
+  // It additionally takes an isolate as a third parameter
+  __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+#endif
+
   __ mov(ip, Operand(entry_hook));
 
-#if ABI_USES_FUNCTION_DESCRIPTORS
-  // Function descriptor
-  __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
-  __ LoadP(ip, MemOperand(ip, 0));
-#elif ABI_CALL_VIA_IP
-// ip set above, so nothing to do.
-#endif
+  if (ABI_USES_FUNCTION_DESCRIPTORS) {
+    __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
+    __ LoadP(ip, MemOperand(ip, 0));
+  }
+  // ip set above, so nothing more to do for ABI_CALL_VIA_IP.
 
   // PPC LINUX ABI:
   __ li(r0, Operand::Zero());
   __ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
-#else
-  // Under the simulator we need to indirect the entry hook through a
-  // trampoline function at a known address.
-  // It additionally takes an isolate as a third parameter
-  __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
 
-  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
-  __ mov(ip, Operand(ExternalReference(
-                 &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
-#endif
   __ Call(ip);
 
-#if !defined(USE_SIMULATOR)
   __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize));
-#endif
 
   // Restore the stack pointer if needed.
   if (frame_alignment > kPointerSize) {
@@ -5143,6 +4702,633 @@
   GenerateCase(masm, FAST_ELEMENTS);
 }
 
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r4 : target
+  //  -- r6 : new target
+  //  -- cp : context
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r4);
+  __ AssertReceiver(r6);
+
+  // Verify that the new target is a JSFunction.
+  Label new_object;
+  __ CompareObjectType(r6, r5, r5, JS_FUNCTION_TYPE);
+  __ bne(&new_object);
+
+  // Load the initial map and verify that it's in fact a map.
+  __ LoadP(r5, FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
+  __ JumpIfSmi(r5, &new_object);
+  __ CompareObjectType(r5, r3, r3, MAP_TYPE);
+  __ bne(&new_object);
+
+  // Fall back to runtime if the target differs from the new target's
+  // initial map constructor.
+  __ LoadP(r3, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
+  __ cmp(r3, r4);
+  __ bne(&new_object);
+
+  // Allocate the JSObject on the heap.
+  Label allocate, done_allocate;
+  __ lbz(r7, FieldMemOperand(r5, Map::kInstanceSizeOffset));
+  __ Allocate(r7, r3, r8, r9, &allocate, SIZE_IN_WORDS);
+  __ bind(&done_allocate);
+
+  // Initialize the JSObject fields.
+  __ StoreP(r5, MemOperand(r3, JSObject::kMapOffset));
+  __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+  __ StoreP(r6, MemOperand(r3, JSObject::kPropertiesOffset));
+  __ StoreP(r6, MemOperand(r3, JSObject::kElementsOffset));
+  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+  __ addi(r4, r3, Operand(JSObject::kHeaderSize));
+
+  // ----------- S t a t e -------------
+  //  -- r3 : result (untagged)
+  //  -- r4 : result fields (untagged)
+  //  -- r8 : result end (untagged)
+  //  -- r5 : initial map
+  //  -- cp : context
+  //  -- lr : return address
+  // -----------------------------------
+
+  // Perform in-object slack tracking if requested.
+  Label slack_tracking;
+  STATIC_ASSERT(Map::kNoSlackTracking == 0);
+  __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+  __ lwz(r6, FieldMemOperand(r5, Map::kBitField3Offset));
+  __ DecodeField<Map::ConstructionCounter>(r10, r6, SetRC);
+  __ bne(&slack_tracking, cr0);
+  {
+    // Initialize all in-object fields with undefined.
+    __ InitializeFieldsWithFiller(r4, r8, r9);
+
+    // Add the object tag to make the JSObject real.
+    __ addi(r3, r3, Operand(kHeapObjectTag));
+    __ Ret();
+  }
+  __ bind(&slack_tracking);
+  {
+    // Decrease generous allocation count.
+    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+    __ Add(r6, r6, -(1 << Map::ConstructionCounter::kShift), r0);
+    __ stw(r6, FieldMemOperand(r5, Map::kBitField3Offset));
+
+    // Initialize the in-object fields with undefined.
+    __ lbz(r7, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
+    __ ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
+    __ sub(r7, r8, r7);
+    __ InitializeFieldsWithFiller(r4, r7, r9);
+
+    // Initialize the remaining (reserved) fields with one pointer filler map.
+    __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
+    __ InitializeFieldsWithFiller(r4, r8, r9);
+
+    // Add the object tag to make the JSObject real.
+    __ addi(r3, r3, Operand(kHeapObjectTag));
+
+    // Check if we can finalize the instance size.
+    __ cmpi(r10, Operand(Map::kSlackTrackingCounterEnd));
+    __ Ret(ne);
+
+    // Finalize the instance size.
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r3, r5);
+      __ CallRuntime(Runtime::kFinalizeInstanceSize);
+      __ Pop(r3);
+    }
+    __ Ret();
+  }
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    STATIC_ASSERT(kSmiTag == 0);
+    __ ShiftLeftImm(r7, r7,
+                    Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
+    __ Push(r5, r7);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Pop(r5);
+  }
+  __ subi(r3, r3, Operand(kHeapObjectTag));
+  __ lbz(r8, FieldMemOperand(r5, Map::kInstanceSizeOffset));
+  __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
+  __ add(r8, r3, r8);
+  __ b(&done_allocate);
+
+  // Fall back to %NewObject.
+  __ bind(&new_object);
+  __ Push(r4, r6);
+  __ TailCallRuntime(Runtime::kNewObject);
+}
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r4 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r4);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make r5 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ mr(r5, fp);
+    __ b(&loop_entry);
+    __ bind(&loop);
+    __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+    __ cmp(ip, r4);
+    __ bne(&loop);
+  }
+
+  // Check if we have rest parameters (only possible if we have an
+  // arguments adaptor frame below the function frame).
+  Label no_rest_parameters;
+  __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&no_rest_parameters);
+
+  // Check if the arguments adaptor frame contains more arguments than
+  // specified by the function's internal formal parameter count.
+  Label rest_parameters;
+  __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadWordArith(
+      r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_PPC64
+  __ SmiTag(r4);
+#endif
+  __ sub(r3, r3, r4, LeaveOE, SetRC);
+  __ bgt(&rest_parameters, cr0);
+
+  // Return an empty rest parameter array.
+  __ bind(&no_rest_parameters);
+  {
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- lr : return address
+    // -----------------------------------
+
+    // Allocate an empty rest parameter array.
+    Label allocate, done_allocate;
+    __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the rest parameter array in r0.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
+    __ StoreP(r4, FieldMemOperand(r3, JSArray::kMapOffset), r0);
+    __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+    __ StoreP(r4, FieldMemOperand(r3, JSArray::kPropertiesOffset), r0);
+    __ StoreP(r4, FieldMemOperand(r3, JSArray::kElementsOffset), r0);
+    __ li(r4, Operand::Zero());
+    __ StoreP(r4, FieldMemOperand(r3, JSArray::kLengthOffset), r0);
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(Smi::FromInt(JSArray::kSize));
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+    }
+    __ b(&done_allocate);
+  }
+
+  __ bind(&rest_parameters);
+  {
+    // Compute the pointer to the first rest parameter (skippping the receiver).
+    __ SmiToPtrArrayOffset(r9, r3);
+    __ add(r5, r5, r9);
+    __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+    // ----------- S t a t e -------------
+    //  -- cp : context
+    //  -- r3 : number of rest parameters (tagged)
+    //  -- r5 : pointer just past first rest parameters
+    //  -- r9 : size of rest parameters
+    //  -- lr : return address
+    // -----------------------------------
+
+    // Allocate space for the rest parameter array plus the backing store.
+    Label allocate, done_allocate;
+    __ mov(r4, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+    __ add(r4, r4, r9);
+    __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the elements array in r6.
+    __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
+    __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
+    __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+    __ addi(r7, r6,
+            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+    {
+      Label loop;
+      __ SmiUntag(r0, r3);
+      __ mtctr(r0);
+      __ bind(&loop);
+      __ LoadPU(ip, MemOperand(r5, -kPointerSize));
+      __ StorePU(ip, MemOperand(r7, kPointerSize));
+      __ bdnz(&loop);
+      __ addi(r7, r7, Operand(kPointerSize));
+    }
+
+    // Setup the rest parameter array in r7.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
+    __ StoreP(r4, MemOperand(r7, JSArray::kMapOffset));
+    __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+    __ StoreP(r4, MemOperand(r7, JSArray::kPropertiesOffset));
+    __ StoreP(r6, MemOperand(r7, JSArray::kElementsOffset));
+    __ StoreP(r3, MemOperand(r7, JSArray::kLengthOffset));
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ addi(r3, r7, Operand(kHeapObjectTag));
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(r4);
+      __ Push(r3, r5, r4);
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+      __ mr(r6, r3);
+      __ Pop(r3, r5);
+    }
+    __ b(&done_allocate);
+  }
+}
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r4 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r4);
+
+  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+  __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadWordArith(
+      r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_PPC64
+  __ SmiTag(r5);
+#endif
+  __ SmiToPtrArrayOffset(r6, r5);
+  __ add(r6, fp, r6);
+  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // r4 : function
+  // r5 : number of parameters (tagged)
+  // r6 : parameters pointer
+  // Registers used over whole function:
+  // r8 : arguments count (tagged)
+  // r9 : mapped parameter count (tagged)
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ beq(&adaptor_frame);
+
+  // No adaptor, parameter count = argument count.
+  __ mr(r8, r5);
+  __ mr(r9, r5);
+  __ b(&try_allocate);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiToPtrArrayOffset(r6, r8);
+  __ add(r6, r6, r7);
+  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // r8 = argument count (tagged)
+  // r9 = parameter count (tagged)
+  // Compute the mapped parameter count = min(r5, r8) in r9.
+  __ cmp(r5, r8);
+  if (CpuFeatures::IsSupported(ISELECT)) {
+    __ isel(lt, r9, r5, r8);
+  } else {
+    Label skip;
+    __ mr(r9, r5);
+    __ blt(&skip);
+    __ mr(r9, r8);
+    __ bind(&skip);
+  }
+
+  __ bind(&try_allocate);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  // If there are no mapped parameters, we do not need the parameter_map.
+  __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+  if (CpuFeatures::IsSupported(ISELECT)) {
+    __ SmiToPtrArrayOffset(r11, r9);
+    __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+    __ isel(eq, r11, r0, r11);
+  } else {
+    Label skip2, skip3;
+    __ bne(&skip2);
+    __ li(r11, Operand::Zero());
+    __ b(&skip3);
+    __ bind(&skip2);
+    __ SmiToPtrArrayOffset(r11, r9);
+    __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+    __ bind(&skip3);
+  }
+
+  // 2. Backing store.
+  __ SmiToPtrArrayOffset(r7, r8);
+  __ add(r11, r11, r7);
+  __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ addi(r11, r11, Operand(JSSloppyArgumentsObject::kSize));
+
+  // Do the allocation of all three objects in one go.
+  __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
+
+  // r3 = address of new object(s) (tagged)
+  // r5 = argument count (smi-tagged)
+  // Get the arguments boilerplate from the current native context into r4.
+  const int kNormalOffset =
+      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+  const int kAliasedOffset =
+      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+  __ LoadP(r7, NativeContextMemOperand());
+  __ cmpi(r9, Operand::Zero());
+  if (CpuFeatures::IsSupported(ISELECT)) {
+    __ LoadP(r11, MemOperand(r7, kNormalOffset));
+    __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+    __ isel(eq, r7, r11, r7);
+  } else {
+    Label skip4, skip5;
+    __ bne(&skip4);
+    __ LoadP(r7, MemOperand(r7, kNormalOffset));
+    __ b(&skip5);
+    __ bind(&skip4);
+    __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+    __ bind(&skip5);
+  }
+
+  // r3 = address of new object (tagged)
+  // r5 = argument count (smi-tagged)
+  // r7 = address of arguments map (tagged)
+  // r9 = mapped parameter count (tagged)
+  __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
+  __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
+  __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+  __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+  // Set up the callee in-object property.
+  __ AssertNotSmi(r4);
+  __ StoreP(r4, FieldMemOperand(r3, JSSloppyArgumentsObject::kCalleeOffset),
+            r0);
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(r8);
+  __ StoreP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset),
+            r0);
+
+  // Set up the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, r7 will point there, otherwise
+  // it will point to the backing store.
+  __ addi(r7, r3, Operand(JSSloppyArgumentsObject::kSize));
+  __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+  // r3 = address of new object (tagged)
+  // r5 = argument count (tagged)
+  // r7 = address of parameter map or backing store (tagged)
+  // r9 = mapped parameter count (tagged)
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+  if (CpuFeatures::IsSupported(ISELECT)) {
+    __ isel(eq, r4, r7, r4);
+    __ beq(&skip_parameter_map);
+  } else {
+    Label skip6;
+    __ bne(&skip6);
+    // Move backing store address to r4, because it is
+    // expected there when filling in the unmapped arguments.
+    __ mr(r4, r7);
+    __ b(&skip_parameter_map);
+    __ bind(&skip6);
+  }
+
+  __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
+  __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
+  __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
+  __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
+  __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
+            r0);
+  __ SmiToPtrArrayOffset(r8, r9);
+  __ add(r8, r8, r7);
+  __ addi(r8, r8, Operand(kParameterMapHeaderSize));
+  __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
+            r0);
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop;
+  __ mr(r8, r9);
+  __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
+  __ sub(r11, r11, r9);
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ SmiToPtrArrayOffset(r4, r8);
+  __ add(r4, r4, r7);
+  __ addi(r4, r4, Operand(kParameterMapHeaderSize));
+
+  // r4 = address of backing store (tagged)
+  // r7 = address of parameter map (tagged)
+  // r8 = temporary scratch (a.o., for address calculation)
+  // r10 = temporary scratch (a.o., for address calculation)
+  // ip = the hole value
+  __ SmiUntag(r8);
+  __ mtctr(r8);
+  __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
+  __ add(r10, r4, r8);
+  __ add(r8, r7, r8);
+  __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+
+  __ bind(&parameters_loop);
+  __ StorePU(r11, MemOperand(r8, -kPointerSize));
+  __ StorePU(ip, MemOperand(r10, -kPointerSize));
+  __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
+  __ bdnz(&parameters_loop);
+
+  // Restore r8 = argument count (tagged).
+  __ LoadP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset));
+
+  __ bind(&skip_parameter_map);
+  // r3 = address of new object (tagged)
+  // r4 = address of backing store (tagged)
+  // r8 = argument count (tagged)
+  // r9 = mapped parameter count (tagged)
+  // r11 = scratch
+  // Copy arguments header and remaining slots (if there are any).
+  __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
+  __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
+  __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
+  __ sub(r11, r8, r9, LeaveOE, SetRC);
+  __ Ret(eq, cr0);
+
+  Label arguments_loop;
+  __ SmiUntag(r11);
+  __ mtctr(r11);
+
+  __ SmiToPtrArrayOffset(r0, r9);
+  __ sub(r6, r6, r0);
+  __ add(r11, r4, r0);
+  __ addi(r11, r11,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+
+  __ bind(&arguments_loop);
+  __ LoadPU(r7, MemOperand(r6, -kPointerSize));
+  __ StorePU(r7, MemOperand(r11, kPointerSize));
+  __ bdnz(&arguments_loop);
+
+  // Return.
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  // r8 = argument count (tagged)
+  __ bind(&runtime);
+  __ Push(r4, r6, r8);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r4 : function
+  //  -- cp : context
+  //  -- fp : frame pointer
+  //  -- lr : return address
+  // -----------------------------------
+  __ AssertFunction(r4);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make r5 point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ mr(r5, fp);
+    __ b(&loop_entry);
+    __ bind(&loop);
+    __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+    __ cmp(ip, r4);
+    __ bne(&loop);
+  }
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(ip, MemOperand(r6, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ beq(&arguments_adaptor);
+  {
+    __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadWordArith(
+        r3,
+        FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_PPC64
+    __ SmiTag(r3);
+#endif
+    __ SmiToPtrArrayOffset(r9, r3);
+    __ add(r5, r5, r9);
+  }
+  __ b(&arguments_done);
+  __ bind(&arguments_adaptor);
+  {
+    __ LoadP(r3, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ SmiToPtrArrayOffset(r9, r3);
+    __ add(r5, r6, r9);
+  }
+  __ bind(&arguments_done);
+  __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // ----------- S t a t e -------------
+  //  -- cp : context
+  //  -- r3 : number of rest parameters (tagged)
+  //  -- r5 : pointer just past first rest parameters
+  //  -- r9 : size of rest parameters
+  //  -- lr : return address
+  // -----------------------------------
+
+  // Allocate space for the strict arguments object plus the backing store.
+  Label allocate, done_allocate;
+  __ mov(r4, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ add(r4, r4, r9);
+  __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT);
+  __ bind(&done_allocate);
+
+  // Setup the elements array in r6.
+  __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
+  __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
+  __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+  __ addi(r7, r6,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+  {
+    Label loop, done_loop;
+    __ SmiUntag(r0, r3, SetRC);
+    __ beq(&done_loop, cr0);
+    __ mtctr(r0);
+    __ bind(&loop);
+    __ LoadPU(ip, MemOperand(r5, -kPointerSize));
+    __ StorePU(ip, MemOperand(r7, kPointerSize));
+    __ bdnz(&loop);
+    __ bind(&done_loop);
+    __ addi(r7, r7, Operand(kPointerSize));
+  }
+
+  // Setup the rest parameter array in r7.
+  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
+  __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kMapOffset));
+  __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+  __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kPropertiesOffset));
+  __ StoreP(r6, MemOperand(r7, JSStrictArgumentsObject::kElementsOffset));
+  __ StoreP(r3, MemOperand(r7, JSStrictArgumentsObject::kLengthOffset));
+  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+  __ addi(r3, r7, Operand(kHeapObjectTag));
+  __ Ret();
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(r4);
+    __ Push(r3, r5, r4);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ mr(r6, r3);
+    __ Pop(r3, r5);
+  }
+  __ b(&done_allocate);
+}
 
 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
   Register context = cp;
@@ -5446,11 +5632,10 @@
   __ b(&leave_exit_frame);
 }
 
-
 static void CallApiFunctionStubHelper(MacroAssembler* masm,
                                       const ParameterCount& argc,
                                       bool return_first_arg,
-                                      bool call_data_undefined) {
+                                      bool call_data_undefined, bool is_lazy) {
   // ----------- S t a t e -------------
   //  -- r3                  : callee
   //  -- r7                  : call_data
@@ -5482,12 +5667,14 @@
   STATIC_ASSERT(FCA::kHolderIndex == 0);
   STATIC_ASSERT(FCA::kArgsLength == 7);
 
-  DCHECK(argc.is_immediate() || r3.is(argc.reg()));
+  DCHECK(argc.is_immediate() || r6.is(argc.reg()));
 
   // context save
   __ push(context);
-  // load context from callee
-  __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+  if (!is_lazy) {
+    // load context from callee
+    __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+  }
 
   // callee
   __ push(callee);
@@ -5586,7 +5773,7 @@
 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
   bool call_data_undefined = this->call_data_undefined();
   CallApiFunctionStubHelper(masm, ParameterCount(r6), false,
-                            call_data_undefined);
+                            call_data_undefined, false);
 }
 
 
@@ -5594,24 +5781,32 @@
   bool is_store = this->is_store();
   int argc = this->argc();
   bool call_data_undefined = this->call_data_undefined();
+  bool is_lazy = this->is_lazy();
   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined);
+                            call_data_undefined, is_lazy);
 }
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- sp[0]                  : name
-  //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object
+  //  -- sp[0]                        : name
+  //  -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
   //  -- ...
-  //  -- r5                     : api_function_address
+  //  -- r5                           : api_function_address
   // -----------------------------------
 
   Register api_function_address = ApiGetterDescriptor::function_address();
+  int arg0Slot = 0;
+  int accessorInfoSlot = 0;
+  int apiStackSpace = 0;
   DCHECK(api_function_address.is(r5));
 
-  __ mr(r3, sp);                               // r0 = Handle<Name>
-  __ addi(r4, r3, Operand(1 * kPointerSize));  // r4 = PCA
+  // v8::PropertyCallbackInfo::args_ array and name handle.
+  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+  // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+  __ mr(r3, sp);                               // r3 = Handle<Name>
+  __ addi(r4, r3, Operand(1 * kPointerSize));  // r4 = v8::PCI::args_
 
 // If ABI passes Handles (pointer-sized struct) in a register:
 //
@@ -5625,37 +5820,38 @@
 //    [0] space for DirectCEntryStub's LR save
 //    [1] copy of Handle (first arg)
 //    [2] AccessorInfo&
-#if ABI_PASSES_HANDLES_IN_REGS
-  const int kAccessorInfoSlot = kStackFrameExtraParamSlot + 1;
-  const int kApiStackSpace = 2;
-#else
-  const int kArg0Slot = kStackFrameExtraParamSlot + 1;
-  const int kAccessorInfoSlot = kArg0Slot + 1;
-  const int kApiStackSpace = 3;
-#endif
+  if (ABI_PASSES_HANDLES_IN_REGS) {
+    accessorInfoSlot = kStackFrameExtraParamSlot + 1;
+    apiStackSpace = 2;
+  } else {
+    arg0Slot = kStackFrameExtraParamSlot + 1;
+    accessorInfoSlot = arg0Slot + 1;
+    apiStackSpace = 3;
+  }
 
   FrameScope frame_scope(masm, StackFrame::MANUAL);
-  __ EnterExitFrame(false, kApiStackSpace);
+  __ EnterExitFrame(false, apiStackSpace);
 
-#if !ABI_PASSES_HANDLES_IN_REGS
-  // pass 1st arg by reference
-  __ StoreP(r3, MemOperand(sp, kArg0Slot * kPointerSize));
-  __ addi(r3, sp, Operand(kArg0Slot * kPointerSize));
-#endif
+  if (!ABI_PASSES_HANDLES_IN_REGS) {
+    // pass 1st arg by reference
+    __ StoreP(r3, MemOperand(sp, arg0Slot * kPointerSize));
+    __ addi(r3, sp, Operand(arg0Slot * kPointerSize));
+  }
 
-  // Create PropertyAccessorInfo instance on the stack above the exit frame with
-  // r4 (internal::Object** args_) as the data.
-  __ StoreP(r4, MemOperand(sp, kAccessorInfoSlot * kPointerSize));
-  // r4 = AccessorInfo&
-  __ addi(r4, sp, Operand(kAccessorInfoSlot * kPointerSize));
-
-  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+  // Create v8::PropertyCallbackInfo object on the stack and initialize
+  // it's args_ field.
+  __ StoreP(r4, MemOperand(sp, accessorInfoSlot * kPointerSize));
+  __ addi(r4, sp, Operand(accessorInfoSlot * kPointerSize));
+  // r4 = v8::PropertyCallbackInfo&
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
+
+  // +3 is to skip prolog, return address and name handle.
+  MemOperand return_value_operand(
+      fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
-                           kStackUnwindSpace, NULL,
-                           MemOperand(fp, 6 * kPointerSize), NULL);
+                           kStackUnwindSpace, NULL, return_value_operand, NULL);
 }
 
 
diff --git a/src/ppc/codegen-ppc.cc b/src/ppc/codegen-ppc.cc
index 2bf8b4e..d6d86b0 100644
--- a/src/ppc/codegen-ppc.cc
+++ b/src/ppc/codegen-ppc.cc
@@ -58,9 +58,7 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-#if !ABI_USES_FUNCTION_DESCRIPTORS
-  DCHECK(!RelocInfo::RequiresRelocation(desc));
-#endif
+  DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
 
   Assembler::FlushICache(isolate, buffer, actual_size);
   base::OS::ProtectCode(buffer, actual_size);
@@ -96,9 +94,7 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-#if !ABI_USES_FUNCTION_DESCRIPTORS
-  DCHECK(!RelocInfo::RequiresRelocation(desc));
-#endif
+  DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
 
   Assembler::FlushICache(isolate, buffer, actual_size);
   base::OS::ProtectCode(buffer, actual_size);
diff --git a/src/ppc/cpu-ppc.cc b/src/ppc/cpu-ppc.cc
index a42fa53..91ea400 100644
--- a/src/ppc/cpu-ppc.cc
+++ b/src/ppc/cpu-ppc.cc
@@ -25,7 +25,7 @@
     return;
   }
 
-  const int kCacheLineSize = CpuFeatures::cache_line_size();
+  const int kCacheLineSize = CpuFeatures::icache_line_size();
   intptr_t mask = kCacheLineSize - 1;
   byte *start =
       reinterpret_cast<byte *>(reinterpret_cast<intptr_t>(buffer) & ~mask);
diff --git a/src/ppc/deoptimizer-ppc.cc b/src/ppc/deoptimizer-ppc.cc
index 4232342..9ec5cdd 100644
--- a/src/ppc/deoptimizer-ppc.cc
+++ b/src/ppc/deoptimizer-ppc.cc
@@ -88,31 +88,6 @@
 }
 
 
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  // Set the register values. The values are not important as there are no
-  // callee saved registers in JavaScript frames, so all registers are
-  // spilled. Registers fp and sp are set to the correct values though.
-  // We ensure the values are Smis to avoid confusing the garbage
-  // collector in the event that any values are retreived and stored
-  // elsewhere.
-
-  for (int i = 0; i < Register::kNumRegisters; i++) {
-    input_->SetRegister(i, reinterpret_cast<intptr_t>(Smi::FromInt(i)));
-  }
-  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
-  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
-    input_->SetDoubleRegister(i, 0.0);
-  }
-
-  // Fill the frame content from the actual data on the frame.
-  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
-    input_->SetFrameSlot(
-        i, reinterpret_cast<intptr_t>(Memory::Address_at(tos + i)));
-  }
-}
-
-
 void Deoptimizer::SetPlatformCompiledStubRegisters(
     FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
   ApiFunction function(descriptor->deoptimization_handler());
@@ -131,8 +106,7 @@
   }
 }
 
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
   // There is no dynamic alignment padding on PPC in the input frame.
   return false;
 }
diff --git a/src/ppc/disasm-ppc.cc b/src/ppc/disasm-ppc.cc
index d9450f8..e72658f 100644
--- a/src/ppc/disasm-ppc.cc
+++ b/src/ppc/disasm-ppc.cc
@@ -1073,14 +1073,12 @@
   out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x       ",
                               instr->InstructionBits());
 
-#if ABI_USES_FUNCTION_DESCRIPTORS
-  // The first field will be identified as a jump table entry.  We emit the rest
-  // of the structure as zero, so just skip past them.
-  if (instr->InstructionBits() == 0) {
+  if (ABI_USES_FUNCTION_DESCRIPTORS && instr->InstructionBits() == 0) {
+    // The first field will be identified as a jump table entry.  We
+    // emit the rest of the structure as zero, so just skip past them.
     Format(instr, "constant");
     return Instruction::kInstrSize;
   }
-#endif
 
   switch (instr->OpcodeValue() << 26) {
     case TWI: {
diff --git a/src/ppc/interface-descriptors-ppc.cc b/src/ppc/interface-descriptors-ppc.cc
index b649f71..3db7bd5 100644
--- a/src/ppc/interface-descriptors-ppc.cc
+++ b/src/ppc/interface-descriptors-ppc.cc
@@ -54,20 +54,6 @@
 const Register StringCompareDescriptor::RightRegister() { return r3; }
 
 
-const Register ArgumentsAccessReadDescriptor::index() { return r4; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return r3; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return r4; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return r5; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r6; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return r5; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return r6; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return r7; }
-
-
 const Register ApiGetterDescriptor::function_address() { return r5; }
 
 
@@ -96,6 +82,29 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r4, r6};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void ToNumberDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -113,6 +122,10 @@
 
 
 // static
+const Register ToNameDescriptor::ReceiverRegister() { return r3; }
+
+
+// static
 const Register ToObjectDescriptor::ReceiverRegister() { return r3; }
 
 
@@ -165,13 +178,6 @@
 }
 
 
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r6, r3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void CallFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r4};
@@ -406,6 +412,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+      kInterpreterDispatchTableRegister};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -417,7 +431,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
@@ -429,7 +442,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterCEntryDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
diff --git a/src/ppc/macro-assembler-ppc.cc b/src/ppc/macro-assembler-ppc.cc
index 9cd35ab..14759de 100644
--- a/src/ppc/macro-assembler-ppc.cc
+++ b/src/ppc/macro-assembler-ppc.cc
@@ -183,6 +183,10 @@
   }
 }
 
+void MacroAssembler::Drop(Register count, Register scratch) {
+  ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
+  add(sp, sp, scratch);
+}
 
 void MacroAssembler::Call(Label* target) { b(target, SetLK); }
 
@@ -298,13 +302,10 @@
 
 void MacroAssembler::InNewSpace(Register object, Register scratch,
                                 Condition cond, Label* branch) {
-  // N.B. scratch may be same register as object
   DCHECK(cond == eq || cond == ne);
-  mov(r0, Operand(ExternalReference::new_space_mask(isolate())));
-  and_(scratch, object, r0);
-  mov(r0, Operand(ExternalReference::new_space_start(isolate())));
-  cmp(scratch, r0);
-  b(cond, branch);
+  const int mask =
+      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+  CheckPageFlag(object, scratch, mask, cond, branch);
 }
 
 
@@ -483,6 +484,68 @@
   }
 }
 
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+                                               Register code_entry,
+                                               Register scratch) {
+  const int offset = JSFunction::kCodeEntryOffset;
+
+  // Since a code entry (value) is always in old space, we don't need to update
+  // remembered set. If incremental marking is off, there is nothing for us to
+  // do.
+  if (!FLAG_incremental_marking) return;
+
+  DCHECK(js_function.is(r4));
+  DCHECK(code_entry.is(r7));
+  DCHECK(scratch.is(r8));
+  AssertNotSmi(js_function);
+
+  if (emit_debug_code()) {
+    addi(scratch, js_function, Operand(offset - kHeapObjectTag));
+    LoadP(ip, MemOperand(scratch));
+    cmp(ip, code_entry);
+    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+
+  CheckPageFlag(code_entry, scratch,
+                MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+  CheckPageFlag(js_function, scratch,
+                MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+  const Register dst = scratch;
+  addi(dst, js_function, Operand(offset - kHeapObjectTag));
+
+  // Save caller-saved registers.  js_function and code_entry are in the
+  // caller-saved register list.
+  DCHECK(kJSCallerSaved & js_function.bit());
+  DCHECK(kJSCallerSaved & code_entry.bit());
+  mflr(r0);
+  MultiPush(kJSCallerSaved | r0.bit());
+
+  int argument_count = 3;
+  PrepareCallCFunction(argument_count, code_entry);
+
+  mr(r3, js_function);
+  mr(r4, dst);
+  mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+
+  {
+    AllowExternalCallThatCantCauseGC scope(this);
+    CallCFunction(
+        ExternalReference::incremental_marking_record_write_code_entry_function(
+            isolate()),
+        argument_count);
+  }
+
+  // Restore caller-saved registers (including js_function and code_entry).
+  MultiPop(kJSCallerSaved | r0.bit());
+  mtlr(r0);
+
+  bind(&done);
+}
 
 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
                                          Register address, Register scratch,
@@ -564,6 +627,16 @@
   mtlr(r0);
 }
 
+void MacroAssembler::RestoreFrameStateForTailCall() {
+  if (FLAG_enable_embedded_constant_pool) {
+    LoadP(kConstantPoolRegister,
+          MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+    set_constant_pool_available(false);
+  }
+  LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  mtlr(r0);
+}
 
 const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
 const int MacroAssembler::kNumSafepointSavedRegisters =
@@ -640,28 +713,27 @@
   fsub(dst, src, kDoubleRegZero);
 }
 
-
-void MacroAssembler::ConvertIntToDouble(Register src,
-                                        DoubleRegister double_dst) {
-  MovIntToDouble(double_dst, src, r0);
-  fcfid(double_dst, double_dst);
+void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
+  MovIntToDouble(dst, src, r0);
+  fcfid(dst, dst);
 }
 
-
 void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
-                                                DoubleRegister double_dst) {
-  MovUnsignedIntToDouble(double_dst, src, r0);
-  fcfid(double_dst, double_dst);
+                                                DoubleRegister dst) {
+  MovUnsignedIntToDouble(dst, src, r0);
+  fcfid(dst, dst);
 }
 
-
-void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
-                                       const Register src,
-                                       const Register int_scratch) {
-  MovIntToDouble(dst, src, int_scratch);
+void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
+  MovIntToDouble(dst, src, r0);
   fcfids(dst, dst);
 }
 
+void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
+                                               DoubleRegister dst) {
+  MovUnsignedIntToDouble(dst, src, r0);
+  fcfids(dst, dst);
+}
 
 #if V8_TARGET_ARCH_PPC64
 void MacroAssembler::ConvertInt64ToDouble(Register src,
@@ -1116,7 +1188,7 @@
       Push(new_target);
     }
     Push(fun, fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -2114,6 +2186,41 @@
   TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
 }
 
+void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
+                                           Register scratch1,
+                                           Register scratch2) {
+#if V8_TARGET_ARCH_PPC64
+  MovDoubleToInt64(scratch1, input);
+  rotldi(scratch1, scratch1, 1);
+  cmpi(scratch1, Operand(1));
+#else
+  MovDoubleToInt64(scratch1, scratch2, input);
+  Label done;
+  cmpi(scratch2, Operand::Zero());
+  bne(&done);
+  lis(scratch2, Operand(SIGN_EXT_IMM16(0x8000)));
+  cmp(scratch1, scratch2);
+  bind(&done);
+#endif
+}
+
+void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+  MovDoubleToInt64(scratch, input);
+#else
+  MovDoubleHighToInt(scratch, input);
+#endif
+  cmpi(scratch, Operand::Zero());
+}
+
+void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+  LoadP(scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
+#else
+  lwz(scratch, FieldMemOperand(input, HeapNumber::kExponentOffset));
+#endif
+  cmpi(scratch, Operand::Zero());
+}
 
 void MacroAssembler::TryDoubleToInt32Exact(Register result,
                                            DoubleRegister double_input,
@@ -2335,18 +2442,6 @@
 }
 
 
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                                   const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  DCHECK(flag == JUMP_FUNCTION || has_frame());
-
-  // Fake a parameter count to avoid emitting code to do the check.
-  ParameterCount expected(0);
-  LoadNativeContextSlot(native_context_index, r4);
-  InvokeFunctionCode(r4, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
                                 Register scratch1, Register scratch2) {
   if (FLAG_native_code_counters && counter->Enabled()) {
@@ -2441,9 +2536,9 @@
     // We don't actually want to generate a pile of code for this, so just
     // claim there is a stack frame, without generating one.
     FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   } else {
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   }
   // will not return here
 }
@@ -2656,6 +2751,18 @@
   }
 }
 
+void MacroAssembler::AssertReceiver(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object, r0);
+    Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
+    push(object);
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
+    pop(object);
+    Check(ge, kOperandIsNotAReceiver);
+  }
+}
 
 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
                                                      Register scratch) {
@@ -3087,20 +3194,21 @@
                                          int num_reg_arguments,
                                          int num_double_arguments) {
   DCHECK(has_frame());
-// Just call directly. The function called cannot cause a GC, or
-// allow preemption, so the return address in the link register
-// stays correct.
+
+  // Just call directly. The function called cannot cause a GC, or
+  // allow preemption, so the return address in the link register
+  // stays correct.
   Register dest = function;
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
-  // AIX uses a function descriptor. When calling C code be aware
-  // of this descriptor and pick up values from it
-  LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
-  LoadP(ip, MemOperand(function, 0));
-  dest = ip;
-#elif ABI_CALL_VIA_IP
-  Move(ip, function);
-  dest = ip;
-#endif
+  if (ABI_USES_FUNCTION_DESCRIPTORS) {
+    // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
+    // aware of this descriptor and pick up values from it
+    LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
+    LoadP(ip, MemOperand(function, 0));
+    dest = ip;
+  } else if (ABI_CALL_VIA_IP) {
+    Move(ip, function);
+    dest = ip;
+  }
 
   Call(dest);
 
@@ -3116,41 +3224,6 @@
 }
 
 
-void MacroAssembler::FlushICache(Register address, size_t size,
-                                 Register scratch) {
-  if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
-    sync();
-    icbi(r0, address);
-    isync();
-    return;
-  }
-
-  Label done;
-
-  dcbf(r0, address);
-  sync();
-  icbi(r0, address);
-  isync();
-
-  // This code handles ranges which cross a single cacheline boundary.
-  // scratch is last cacheline which intersects range.
-  const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size());
-
-  DCHECK(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2));
-  addi(scratch, address, Operand(size - 1));
-  ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2));
-  cmpl(scratch, address);
-  ble(&done);
-
-  dcbf(r0, scratch);
-  sync();
-  icbi(r0, scratch);
-  isync();
-
-  bind(&done);
-}
-
-
 void MacroAssembler::DecodeConstantPoolOffset(Register result,
                                               Register location) {
   Label overflow_access, done;
@@ -3386,7 +3459,8 @@
 }
 
 
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+  Register null_value = r8;
   Register empty_fixed_array_value = r9;
   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
   Label next, start;
@@ -3400,6 +3474,7 @@
   CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
   beq(call_runtime);
 
+  LoadRoot(null_value, Heap::kNullValueRootIndex);
   b(&start);
 
   bind(&next);
diff --git a/src/ppc/macro-assembler-ppc.h b/src/ppc/macro-assembler-ppc.h
index 78de89a..d9dbd56 100644
--- a/src/ppc/macro-assembler-ppc.h
+++ b/src/ppc/macro-assembler-ppc.h
@@ -16,6 +16,7 @@
 // Give alias names to registers for calling conventions.
 const Register kReturnRegister0 = {Register::kCode_r3};
 const Register kReturnRegister1 = {Register::kCode_r4};
+const Register kReturnRegister2 = {Register::kCode_r5};
 const Register kJSFunctionRegister = {Register::kCode_r4};
 const Register kContextRegister = {Register::kCode_r30};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_r3};
@@ -146,6 +147,7 @@
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
   void Drop(int count);
+  void Drop(Register count, Register scratch = r0);
 
   void Ret(int drop) {
     Drop(drop);
@@ -161,6 +163,7 @@
   }
 
   // Register move. May do nothing if the registers are identical.
+  void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
   void Move(Register dst, Handle<Object> value);
   void Move(Register dst, Register src, Condition cond = al);
   void Move(DoubleRegister dst, DoubleRegister src);
@@ -200,13 +203,13 @@
   // Check if object is in new space.  Jumps if the object is not in new space.
   // The register scratch can be object itself, but scratch will be clobbered.
   void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
-    InNewSpace(object, scratch, ne, branch);
+    InNewSpace(object, scratch, eq, branch);
   }
 
   // Check if object is in new space.  Jumps if the object is in new space.
   // The register scratch can be object itself, but it will be clobbered.
   void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
-    InNewSpace(object, scratch, eq, branch);
+    InNewSpace(object, scratch, ne, branch);
   }
 
   // Check if an object has a given incremental marking color.
@@ -248,6 +251,11 @@
                      pointers_to_here_check_for_value);
   }
 
+  // Notify the garbage collector that we wrote a code entry into a
+  // JSFunction. Only scratch is clobbered by the operation.
+  void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+                                 Register scratch);
+
   void RecordWriteForMap(Register object, Register map, Register dst,
                          LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
 
@@ -341,6 +349,10 @@
   void PushFixedFrame(Register marker_reg = no_reg);
   void PopFixedFrame(Register marker_reg = no_reg);
 
+  // Restore caller's frame pointer and return address prior to being
+  // overwritten by tail call stack preparation.
+  void RestoreFrameStateForTailCall();
+
   // Push and pop the registers that can hold pointers, as defined by the
   // RegList constant kSafepointSavedRegisters.
   void PushSafepointRegisters();
@@ -364,18 +376,20 @@
   }
 
   // Converts the integer (untagged smi) in |src| to a double, storing
-  // the result to |double_dst|
-  void ConvertIntToDouble(Register src, DoubleRegister double_dst);
+  // the result to |dst|
+  void ConvertIntToDouble(Register src, DoubleRegister dst);
 
   // Converts the unsigned integer (untagged smi) in |src| to
-  // a double, storing the result to |double_dst|
-  void ConvertUnsignedIntToDouble(Register src, DoubleRegister double_dst);
+  // a double, storing the result to |dst|
+  void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
 
   // Converts the integer (untagged smi) in |src| to
   // a float, storing the result in |dst|
-  // Warning: The value in |int_scrach| will be changed in the process!
-  void ConvertIntToFloat(const DoubleRegister dst, const Register src,
-                         const Register int_scratch);
+  void ConvertIntToFloat(Register src, DoubleRegister dst);
+
+  // Converts the unsigned integer (untagged smi) in |src| to
+  // a float, storing the result in |dst|
+  void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
 
 #if V8_TARGET_ARCH_PPC64
   void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
@@ -858,6 +872,16 @@
   void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
                          Register scratch2, DoubleRegister double_scratch);
 
+  // Check if a double is equal to -0.0.
+  // CR_EQ in cr7 holds the result.
+  void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1,
+                             Register scratch2);
+
+  // Check the sign of a double.
+  // CR_LT in cr7 holds the result.
+  void TestDoubleSign(DoubleRegister input, Register scratch);
+  void TestHeapNumberSign(Register input, Register scratch);
+
   // Try to convert a double to a signed 32-bit integer.
   // CR_EQ in cr7 is set and result assigned if the conversion is exact.
   void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
@@ -1004,10 +1028,6 @@
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
-  // Invoke specified builtin JavaScript function.
-  void InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                     const CallWrapper& call_wrapper = NullCallWrapper());
-
   Handle<Object> CodeObject() {
     DCHECK(!code_object_.is_null());
     return code_object_;
@@ -1332,6 +1352,9 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+  void AssertReceiver(Register object);
+
   // Abort execution if argument is not undefined or an AllocationSite, enabled
   // via --debug-code.
   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1446,9 +1469,9 @@
   // Returns the pc offset at which the frame ends.
   int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
 
-  // Expects object in r0 and returns map with validated enum cache
-  // in r0.  Assumes that any other register can be used as a scratch.
-  void CheckEnumCache(Register null_value, Label* call_runtime);
+  // Expects object in r3 and returns map with validated enum cache
+  // in r3.  Assumes that any other register can be used as a scratch.
+  void CheckEnumCache(Label* call_runtime);
 
   // AllocationMemento support. Arrays may have an associated
   // AllocationMemento object that can be checked for in order to pretransition
diff --git a/src/ppc/simulator-ppc.cc b/src/ppc/simulator-ppc.cc
index 0efa660..9a1f9e0 100644
--- a/src/ppc/simulator-ppc.cc
+++ b/src/ppc/simulator-ppc.cc
@@ -15,6 +15,7 @@
 #include "src/ppc/constants-ppc.h"
 #include "src/ppc/frames-ppc.h"
 #include "src/ppc/simulator-ppc.h"
+#include "src/runtime/runtime-utils.h"
 
 #if defined(USE_SIMULATOR)
 
@@ -446,7 +447,8 @@
           HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
           intptr_t value = *cur;
           Heap* current_heap = sim_->isolate_->heap();
-          if (((value & 1) == 0) || current_heap->Contains(obj)) {
+          if (((value & 1) == 0) ||
+              current_heap->ContainsSlow(obj->address())) {
             PrintF(" (");
             if ((value & 1) == 0) {
               PrintF("smi %d", PlatformSmiTagging::SmiToInt(obj));
@@ -855,10 +857,19 @@
         isolate->simulator_i_cache(),
         reinterpret_cast<void*>(&swi_instruction_), Instruction::kInstrSize);
     isolate->set_simulator_redirection(this);
+    if (ABI_USES_FUNCTION_DESCRIPTORS) {
+      function_descriptor_[0] = reinterpret_cast<intptr_t>(&swi_instruction_);
+      function_descriptor_[1] = 0;
+      function_descriptor_[2] = 0;
+    }
   }
 
-  void* address_of_swi_instruction() {
-    return reinterpret_cast<void*>(&swi_instruction_);
+  void* address() {
+    if (ABI_USES_FUNCTION_DESCRIPTORS) {
+      return reinterpret_cast<void*>(function_descriptor_);
+    } else {
+      return reinterpret_cast<void*>(&swi_instruction_);
+    }
   }
 
   void* external_function() { return external_function_; }
@@ -883,9 +894,16 @@
     return reinterpret_cast<Redirection*>(addr_of_redirection);
   }
 
+  static Redirection* FromAddress(void* address) {
+    int delta = ABI_USES_FUNCTION_DESCRIPTORS
+                    ? offsetof(Redirection, function_descriptor_)
+                    : offsetof(Redirection, swi_instruction_);
+    char* addr_of_redirection = reinterpret_cast<char*>(address) - delta;
+    return reinterpret_cast<Redirection*>(addr_of_redirection);
+  }
+
   static void* ReverseRedirection(intptr_t reg) {
-    Redirection* redirection = FromSwiInstruction(
-        reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+    Redirection* redirection = FromAddress(reinterpret_cast<void*>(reg));
     return redirection->external_function();
   }
 
@@ -902,6 +920,7 @@
   uint32_t swi_instruction_;
   ExternalReference::Type type_;
   Redirection* next_;
+  intptr_t function_descriptor_[3];
 };
 
 
@@ -922,7 +941,7 @@
                                            void* external_function,
                                            ExternalReference::Type type) {
   Redirection* redirection = Redirection::Get(isolate, external_function, type);
-  return redirection->address_of_swi_instruction();
+  return redirection->address();
 }
 
 
@@ -1171,20 +1190,11 @@
 
 
 #if V8_TARGET_ARCH_PPC64
-struct ObjectPair {
-  intptr_t x;
-  intptr_t y;
-};
-
-
 static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
-  *x = pair->x;
-  *y = pair->y;
+  *x = reinterpret_cast<intptr_t>(pair->x);
+  *y = reinterpret_cast<intptr_t>(pair->y);
 }
 #else
-typedef uint64_t ObjectPair;
-
-
 static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
 #if V8_TARGET_BIG_ENDIAN
   *x = static_cast<int32_t>(*pair >> 32);
@@ -1196,16 +1206,17 @@
 }
 #endif
 
-// Calls into the V8 runtime are based on this very simple interface.
-// Note: To be able to return two values from some calls the code in
-// runtime.cc uses the ObjectPair which is essentially two pointer
-// values stuffed into a structure. With the code below we assume that
-// all runtime calls return this pair. If they don't, the r4 result
-// register contains a bogus value, which is fine because it is
-// caller-saved.
-typedef ObjectPair (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
-                                           intptr_t arg2, intptr_t arg3,
-                                           intptr_t arg4, intptr_t arg5);
+// Calls into the V8 runtime.
+typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
+                                         intptr_t arg2, intptr_t arg3,
+                                         intptr_t arg4, intptr_t arg5);
+typedef ObjectPair (*SimulatorRuntimePairCall)(intptr_t arg0, intptr_t arg1,
+                                               intptr_t arg2, intptr_t arg3,
+                                               intptr_t arg4, intptr_t arg5);
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(intptr_t arg0, intptr_t arg1,
+                                                   intptr_t arg2, intptr_t arg3,
+                                                   intptr_t arg4,
+                                                   intptr_t arg5);
 
 // These prototypes handle the four types of FP calls.
 typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
@@ -1237,13 +1248,15 @@
       Redirection* redirection = Redirection::FromSwiInstruction(instr);
       const int kArgCount = 6;
       int arg0_regnum = 3;
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
       intptr_t result_buffer = 0;
-      if (redirection->type() == ExternalReference::BUILTIN_OBJECTPAIR_CALL) {
+      bool uses_result_buffer =
+          redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE ||
+          (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR &&
+           !ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
+      if (uses_result_buffer) {
         result_buffer = get_register(r3);
         arg0_regnum++;
       }
-#endif
       intptr_t arg[kArgCount];
       for (int i = 0; i < kArgCount; i++) {
         arg[i] = get_register(arg0_regnum + i);
@@ -1389,9 +1402,9 @@
         CHECK(stack_aligned);
         SimulatorRuntimeDirectGetterCall target =
             reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
-#if !ABI_PASSES_HANDLES_IN_REGS
-        arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
-#endif
+        if (!ABI_PASSES_HANDLES_IN_REGS) {
+          arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+        }
         target(arg[0], arg[1]);
       } else if (redirection->type() ==
                  ExternalReference::PROFILING_GETTER_CALL) {
@@ -1408,9 +1421,9 @@
         CHECK(stack_aligned);
         SimulatorRuntimeProfilingGetterCall target =
             reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
-#if !ABI_PASSES_HANDLES_IN_REGS
-        arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
-#endif
+        if (!ABI_PASSES_HANDLES_IN_REGS) {
+          arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+        }
         target(arg[0], arg[1], Redirection::ReverseRedirection(arg[2]));
       } else {
         // builtin call.
@@ -1430,19 +1443,53 @@
           PrintF("\n");
         }
         CHECK(stack_aligned);
-        DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
-        SimulatorRuntimeCall target =
-            reinterpret_cast<SimulatorRuntimeCall>(external);
-        ObjectPair result =
-            target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
-        intptr_t x;
-        intptr_t y;
-        decodeObjectPair(&result, &x, &y);
-        if (::v8::internal::FLAG_trace_sim) {
-          PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
+        if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
+          SimulatorRuntimeTripleCall target =
+              reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+          ObjectTriple result =
+              target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+          if (::v8::internal::FLAG_trace_sim) {
+            PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+                   "}\n",
+                   reinterpret_cast<intptr_t>(result.x),
+                   reinterpret_cast<intptr_t>(result.y),
+                   reinterpret_cast<intptr_t>(result.z));
+          }
+          memcpy(reinterpret_cast<void*>(result_buffer), &result,
+                 sizeof(ObjectTriple));
+          set_register(r3, result_buffer);
+        } else {
+          if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
+            SimulatorRuntimePairCall target =
+                reinterpret_cast<SimulatorRuntimePairCall>(external);
+            ObjectPair result =
+                target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+            intptr_t x;
+            intptr_t y;
+            decodeObjectPair(&result, &x, &y);
+            if (::v8::internal::FLAG_trace_sim) {
+              PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
+            }
+            if (ABI_RETURNS_OBJECT_PAIRS_IN_REGS) {
+              set_register(r3, x);
+              set_register(r4, y);
+            } else {
+              memcpy(reinterpret_cast<void*>(result_buffer), &result,
+                     sizeof(ObjectPair));
+              set_register(r3, result_buffer);
+            }
+          } else {
+            DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+            SimulatorRuntimeCall target =
+                reinterpret_cast<SimulatorRuntimeCall>(external);
+            intptr_t result =
+                target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+            if (::v8::internal::FLAG_trace_sim) {
+              PrintF("Returned %08" V8PRIxPTR "\n", result);
+            }
+            set_register(r3, result);
+          }
         }
-        set_register(r3, x);
-        set_register(r4, y);
       }
       set_pc(saved_lr);
       break;
@@ -3852,17 +3899,19 @@
   // Adjust JS-based stack limit to C-based stack limit.
   isolate_->stack_guard()->AdjustStackLimitForSimulator();
 
-// Prepare to execute the code at entry
-#if ABI_USES_FUNCTION_DESCRIPTORS
-  // entry is the function descriptor
-  set_pc(*(reinterpret_cast<intptr_t*>(entry)));
-#else
-  // entry is the instruction address
-  set_pc(reinterpret_cast<intptr_t>(entry));
-#endif
+  // Prepare to execute the code at entry
+  if (ABI_USES_FUNCTION_DESCRIPTORS) {
+    // entry is the function descriptor
+    set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+  } else {
+    // entry is the instruction address
+    set_pc(reinterpret_cast<intptr_t>(entry));
+  }
 
-  // Put target address in ip (for JS prologue).
-  set_register(r12, get_pc());
+  if (ABI_CALL_VIA_IP) {
+    // Put target address in ip (for JS prologue).
+    set_register(r12, get_pc());
+  }
 
   // Put down marker for end of simulation. The simulator will stop simulation
   // when the PC reaches this value. By saving the "end simulation" value into
@@ -3919,8 +3968,12 @@
   Execute();
 
   // Check that the non-volatile registers have been preserved.
-  CHECK_EQ(callee_saved_value, get_register(r2));
-  CHECK_EQ(callee_saved_value, get_register(r13));
+  if (ABI_TOC_REGISTER != 2) {
+    CHECK_EQ(callee_saved_value, get_register(r2));
+  }
+  if (ABI_TOC_REGISTER != 13) {
+    CHECK_EQ(callee_saved_value, get_register(r13));
+  }
   CHECK_EQ(callee_saved_value, get_register(r14));
   CHECK_EQ(callee_saved_value, get_register(r15));
   CHECK_EQ(callee_saved_value, get_register(r16));
diff --git a/src/profiler/cpu-profiler.cc b/src/profiler/cpu-profiler.cc
index bbddc87..b6c7945 100644
--- a/src/profiler/cpu-profiler.cc
+++ b/src/profiler/cpu-profiler.cc
@@ -49,12 +49,12 @@
   regs.sp = fp - fp_to_sp_delta;
   regs.fp = fp;
   regs.pc = from;
-  record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame);
+  record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, false);
   ticks_from_vm_buffer_.Enqueue(record);
 }
 
-
-void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
+void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
+                                              bool update_stats) {
   TickSampleEventRecord record(last_code_event_id_.Value());
   RegisterState regs;
   StackFrameIterator it(isolate);
@@ -64,7 +64,7 @@
     regs.fp = frame->fp();
     regs.pc = frame->pc();
   }
-  record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame);
+  record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, update_stats);
   ticks_from_vm_buffer_.Enqueue(record);
 }
 
@@ -429,6 +429,11 @@
   profiles_ = new CpuProfilesCollection(isolate()->heap());
 }
 
+void CpuProfiler::CollectSample() {
+  if (processor_ != NULL) {
+    processor_->AddCurrentStack(isolate_);
+  }
+}
 
 void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
   if (profiles_->StartProfiling(title, record_samples)) {
diff --git a/src/profiler/cpu-profiler.h b/src/profiler/cpu-profiler.h
index e5ef0ac..1a1249c 100644
--- a/src/profiler/cpu-profiler.h
+++ b/src/profiler/cpu-profiler.h
@@ -139,7 +139,7 @@
   void Enqueue(const CodeEventsContainer& event);
 
   // Puts current stack into tick sample events buffer.
-  void AddCurrentStack(Isolate* isolate);
+  void AddCurrentStack(Isolate* isolate, bool update_stats = false);
   void AddDeoptStack(Isolate* isolate, Address from, int fp_to_sp_delta);
 
   // Tick sample events are filled directly in the buffer of the circular
@@ -168,8 +168,7 @@
   ProfileGenerator* generator_;
   Sampler* sampler_;
   base::Atomic32 running_;
-  // Sampling period in microseconds.
-  const base::TimeDelta period_;
+  const base::TimeDelta period_;  // Samples & code events processing period.
   LockedQueue<CodeEventsContainer> events_buffer_;
   static const size_t kTickSampleBufferSize = 1 * MB;
   static const size_t kTickSampleQueueLength =
@@ -205,6 +204,7 @@
   virtual ~CpuProfiler();
 
   void set_sampling_interval(base::TimeDelta value);
+  void CollectSample();
   void StartProfiling(const char* title, bool record_samples = false);
   void StartProfiling(String* title, bool record_samples);
   CpuProfile* StopProfiling(const char* title);
diff --git a/src/profiler/heap-profiler.cc b/src/profiler/heap-profiler.cc
index 4403e5d..1305cae 100644
--- a/src/profiler/heap-profiler.cc
+++ b/src/profiler/heap-profiler.cc
@@ -8,6 +8,7 @@
 #include "src/debug/debug.h"
 #include "src/profiler/allocation-tracker.h"
 #include "src/profiler/heap-snapshot-generator-inl.h"
+#include "src/profiler/sampling-heap-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -84,6 +85,31 @@
 }
 
 
+bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
+                                             int stack_depth) {
+  if (sampling_heap_profiler_.get()) {
+    return false;
+  }
+  sampling_heap_profiler_.Reset(new SamplingHeapProfiler(
+      heap(), names_.get(), sample_interval, stack_depth));
+  return true;
+}
+
+
+void HeapProfiler::StopSamplingHeapProfiler() {
+  sampling_heap_profiler_.Reset(nullptr);
+}
+
+
+v8::AllocationProfile* HeapProfiler::GetAllocationProfile() {
+  if (sampling_heap_profiler_.get()) {
+    return sampling_heap_profiler_->GetAllocationProfile();
+  } else {
+    return nullptr;
+  }
+}
+
+
 void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
   ids_->UpdateHeapObjectsMap();
   is_tracking_object_moves_ = true;
diff --git a/src/profiler/heap-profiler.h b/src/profiler/heap-profiler.h
index 9a04e83..32e143c 100644
--- a/src/profiler/heap-profiler.h
+++ b/src/profiler/heap-profiler.h
@@ -16,6 +16,7 @@
 class AllocationTracker;
 class HeapObjectsMap;
 class HeapSnapshot;
+class SamplingHeapProfiler;
 class StringsStorage;
 
 class HeapProfiler {
@@ -29,6 +30,11 @@
       v8::ActivityControl* control,
       v8::HeapProfiler::ObjectNameResolver* resolver);
 
+  bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth);
+  void StopSamplingHeapProfiler();
+  bool is_sampling_allocations() { return !sampling_heap_profiler_.is_empty(); }
+  AllocationProfile* GetAllocationProfile();
+
   void StartHeapObjectsTracking(bool track_allocations);
   void StopHeapObjectsTracking();
   AllocationTracker* allocation_tracker() const {
@@ -79,6 +85,7 @@
   base::SmartPointer<AllocationTracker> allocation_tracker_;
   bool is_tracking_object_moves_;
   base::Mutex profiler_mutex_;
+  base::SmartPointer<SamplingHeapProfiler> sampling_heap_profiler_;
 };
 
 }  // namespace internal
diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc
index 69ed5e6..fc43f9f 100644
--- a/src/profiler/heap-snapshot-generator.cc
+++ b/src/profiler/heap-snapshot-generator.cc
@@ -11,7 +11,6 @@
 #include "src/profiler/allocation-tracker.h"
 #include "src/profiler/heap-profiler.h"
 #include "src/profiler/heap-snapshot-generator-inl.h"
-#include "src/types.h"
 
 namespace v8 {
 namespace internal {
@@ -1109,10 +1108,6 @@
     TagObject(js_fun->bound_arguments(), "(bound arguments)");
     SetInternalReference(js_fun, entry, "bindings", js_fun->bound_arguments(),
                          JSBoundFunction::kBoundArgumentsOffset);
-    TagObject(js_fun->creation_context(), "(creation context)");
-    SetInternalReference(js_fun, entry, "creation_context",
-                         js_fun->creation_context(),
-                         JSBoundFunction::kCreationContextOffset);
     SetNativeBindReference(js_obj, entry, "bound_this", js_fun->bound_this());
     SetNativeBindReference(js_obj, entry, "bound_function",
                            js_fun->bound_target_function());
@@ -1425,18 +1420,17 @@
   SetInternalReference(accessor_info, entry, "expected_receiver_type",
                        accessor_info->expected_receiver_type(),
                        AccessorInfo::kExpectedReceiverTypeOffset);
-  if (accessor_info->IsExecutableAccessorInfo()) {
-    ExecutableAccessorInfo* executable_accessor_info =
-        ExecutableAccessorInfo::cast(accessor_info);
+  if (accessor_info->IsAccessorInfo()) {
+    AccessorInfo* executable_accessor_info = AccessorInfo::cast(accessor_info);
     SetInternalReference(executable_accessor_info, entry, "getter",
                          executable_accessor_info->getter(),
-                         ExecutableAccessorInfo::kGetterOffset);
+                         AccessorInfo::kGetterOffset);
     SetInternalReference(executable_accessor_info, entry, "setter",
                          executable_accessor_info->setter(),
-                         ExecutableAccessorInfo::kSetterOffset);
+                         AccessorInfo::kSetterOffset);
     SetInternalReference(executable_accessor_info, entry, "data",
                          executable_accessor_info->data(),
-                         ExecutableAccessorInfo::kDataOffset);
+                         AccessorInfo::kDataOffset);
   }
 }
 
@@ -1538,7 +1532,7 @@
   // Do not visit weak_next as it is not visited by the StaticVisitor,
   // and we're not very interested in weak_next field here.
   STATIC_ASSERT(AllocationSite::kWeakNextOffset >=
-                AllocationSite::BodyDescriptor::kEndOffset);
+                AllocationSite::kPointerFieldsEndOffset);
 }
 
 
@@ -1604,7 +1598,7 @@
           int field_offset =
               field_index.is_inobject() ? field_index.offset() : -1;
 
-          if (k != heap_->hidden_string()) {
+          if (k != heap_->hidden_properties_symbol()) {
             SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
                                                value, NULL, field_offset);
           } else {
@@ -1631,7 +1625,7 @@
         DCHECK(dictionary->ValueAt(i)->IsPropertyCell());
         PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(i));
         Object* value = cell->value();
-        if (k == heap_->hidden_string()) {
+        if (k == heap_->hidden_properties_symbol()) {
           TagObject(value, "(hidden properties)");
           SetInternalReference(js_obj, entry, "hidden_properties", value);
           continue;
@@ -1648,7 +1642,7 @@
       Object* k = dictionary->KeyAt(i);
       if (dictionary->IsKey(k)) {
         Object* value = dictionary->ValueAt(i);
-        if (k == heap_->hidden_string()) {
+        if (k == heap_->hidden_properties_symbol()) {
           TagObject(value, "(hidden properties)");
           SetInternalReference(js_obj, entry, "hidden_properties", value);
           continue;
@@ -1873,7 +1867,6 @@
 bool V8HeapExplorer::IsEssentialObject(Object* object) {
   return object->IsHeapObject() && !object->IsOddball() &&
          object != heap_->empty_byte_array() &&
-         object != heap_->empty_bytecode_array() &&
          object != heap_->empty_fixed_array() &&
          object != heap_->empty_descriptor_array() &&
          object != heap_->fixed_array_map() && object != heap_->cell_map() &&
diff --git a/src/profiler/profile-generator.cc b/src/profiler/profile-generator.cc
index 890f341..58d06c9 100644
--- a/src/profiler/profile-generator.cc
+++ b/src/profiler/profile-generator.cc
@@ -274,9 +274,8 @@
   return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
 }
 
-
 ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path,
-                                         int src_line) {
+                                         int src_line, bool update_stats) {
   ProfileNode* node = root_;
   CodeEntry* last_entry = NULL;
   for (CodeEntry** entry = path.start() + path.length() - 1;
@@ -290,9 +289,11 @@
   if (last_entry && last_entry->has_deopt_info()) {
     node->CollectDeoptInfo(last_entry);
   }
-  node->IncrementSelfTicks();
-  if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
-    node->IncrementLineTicks(src_line);
+  if (update_stats) {
+    node->IncrementSelfTicks();
+    if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
+      node->IncrementLineTicks(src_line);
+    }
   }
   return node;
 }
@@ -354,11 +355,12 @@
       start_time_(base::TimeTicks::HighResolutionNow()),
       top_down_(isolate) {}
 
-
 void CpuProfile::AddPath(base::TimeTicks timestamp,
-                         const Vector<CodeEntry*>& path, int src_line) {
-  ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path, src_line);
-  if (record_samples_) {
+                         const Vector<CodeEntry*>& path, int src_line,
+                         bool update_stats) {
+  ProfileNode* top_frame_node =
+      top_down_.AddPathFromEnd(path, src_line, update_stats);
+  if (record_samples_ && !timestamp.IsNull()) {
     timestamps_.Add(timestamp);
     samples_.Add(top_frame_node);
   }
@@ -522,15 +524,15 @@
   UNREACHABLE();
 }
 
-
 void CpuProfilesCollection::AddPathToCurrentProfiles(
-    base::TimeTicks timestamp, const Vector<CodeEntry*>& path, int src_line) {
+    base::TimeTicks timestamp, const Vector<CodeEntry*>& path, int src_line,
+    bool update_stats) {
   // As starting / stopping profiles is rare relatively to this
   // method, we don't bother minimizing the duration of lock holding,
   // e.g. copying contents of the list to a local vector.
   current_profiles_semaphore_.Wait();
   for (int i = 0; i < current_profiles_.length(); ++i) {
-    current_profiles_[i]->AddPath(timestamp, path, src_line);
+    current_profiles_[i]->AddPath(timestamp, path, src_line, update_stats);
   }
   current_profiles_semaphore_.Signal();
 }
@@ -595,7 +597,7 @@
       // Don't use PC when in external callback code, as it can point
       // inside callback's code, and we will erroneously report
       // that a callback calls itself.
-      *entry++ = code_map_.FindEntry(sample.external_callback);
+      *entry++ = code_map_.FindEntry(sample.external_callback_entry);
     } else {
       CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
       // If there is no pc_entry we're likely in native code.
@@ -634,10 +636,9 @@
       }
     }
 
-    for (const Address* stack_pos = sample.stack,
-           *stack_end = stack_pos + sample.frames_count;
-         stack_pos != stack_end;
-         ++stack_pos) {
+    for (const Address *stack_pos = sample.stack,
+                       *stack_end = stack_pos + sample.frames_count;
+         stack_pos != stack_end; ++stack_pos) {
       *entry = code_map_.FindEntry(*stack_pos);
 
       // Skip unresolved frames (e.g. internal frame) and get source line of
@@ -670,7 +671,8 @@
     }
   }
 
-  profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line);
+  profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line,
+                                      sample.update_stats);
 }
 
 
diff --git a/src/profiler/profile-generator.h b/src/profiler/profile-generator.h
index 47a73f1..3c976d6 100644
--- a/src/profiler/profile-generator.h
+++ b/src/profiler/profile-generator.h
@@ -192,7 +192,8 @@
 
   ProfileNode* AddPathFromEnd(
       const Vector<CodeEntry*>& path,
-      int src_line = v8::CpuProfileNode::kNoLineNumberInfo);
+      int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
+      bool update_stats = true);
   ProfileNode* root() const { return root_; }
   unsigned next_node_id() { return next_node_id_++; }
   unsigned GetFunctionId(const ProfileNode* node);
@@ -225,7 +226,7 @@
 
   // Add pc -> ... -> main() call path to the profile.
   void AddPath(base::TimeTicks timestamp, const Vector<CodeEntry*>& path,
-               int src_line);
+               int src_line, bool update_stats);
   void CalculateTotalTicksAndSamplingRate();
 
   const char* title() const { return title_; }
@@ -333,7 +334,8 @@
 
   // Called from profile generator thread.
   void AddPathToCurrentProfiles(base::TimeTicks timestamp,
-                                const Vector<CodeEntry*>& path, int src_line);
+                                const Vector<CodeEntry*>& path, int src_line,
+                                bool update_stats);
 
   // Limits the number of profiles that can be simultaneously collected.
   static const int kMaxSimultaneousProfiles = 100;
diff --git a/src/profiler/sampler.cc b/src/profiler/sampler.cc
index dc4c4c4..e331db9 100644
--- a/src/profiler/sampler.cc
+++ b/src/profiler/sampler.cc
@@ -657,10 +657,12 @@
 //
 DISABLE_ASAN void TickSample::Init(Isolate* isolate,
                                    const v8::RegisterState& regs,
-                                   RecordCEntryFrame record_c_entry_frame) {
+                                   RecordCEntryFrame record_c_entry_frame,
+                                   bool update_stats) {
   timestamp = base::TimeTicks::HighResolutionNow();
   pc = reinterpret_cast<Address>(regs.pc);
   state = isolate->current_vm_state();
+  this->update_stats = update_stats;
 
   // Avoid collecting traces while doing GC.
   if (state == GC) return;
@@ -669,6 +671,8 @@
   if (js_entry_sp == 0) return;  // Not executing JS now.
 
   if (pc && IsNoFrameRegion(pc)) {
+    // Can't collect stack. Mark the sample as spoiled.
+    timestamp = base::TimeTicks();
     pc = 0;
     return;
   }
@@ -679,7 +683,7 @@
   // we have already entrered JavaScript again and the external callback
   // is not the top function.
   if (scope && scope->scope_address() < handler) {
-    external_callback = scope->callback();
+    external_callback_entry = *scope->callback_entrypoint_address();
     has_external_callback = true;
   } else {
     // sp register may point at an arbitrary place in memory, make
@@ -699,6 +703,12 @@
   GetStackSample(isolate, regs, record_c_entry_frame,
                  reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info);
   frames_count = static_cast<unsigned>(info.frames_count);
+  if (!frames_count) {
+    // It is executing JS but failed to collect a stack trace.
+    // Mark the sample as spoiled.
+    timestamp = base::TimeTicks();
+    pc = 0;
+  }
 }
 
 
@@ -743,7 +753,6 @@
 #endif
 }
 
-
 Sampler::Sampler(Isolate* isolate, int interval)
     : isolate_(isolate),
       interval_(interval),
@@ -751,17 +760,16 @@
       has_processing_thread_(false),
       active_(false),
       is_counting_samples_(false),
-      js_and_external_sample_count_(0) {
+      js_sample_count_(0),
+      external_sample_count_(0) {
   data_ = new PlatformData;
 }
 
-
 Sampler::~Sampler() {
   DCHECK(!IsActive());
   delete data_;
 }
 
-
 void Sampler::Start() {
   DCHECK(!IsActive());
   SetActive(true);
@@ -796,11 +804,10 @@
   TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
   TickSample sample_obj;
   if (sample == NULL) sample = &sample_obj;
-  sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame);
-  if (is_counting_samples_) {
-    if (sample->state == JS || sample->state == EXTERNAL) {
-      ++js_and_external_sample_count_;
-    }
+  sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame, true);
+  if (is_counting_samples_ && !sample->timestamp.IsNull()) {
+    if (sample->state == JS) ++js_sample_count_;
+    if (sample->state == EXTERNAL) ++external_sample_count_;
   }
   Tick(sample);
   if (sample != &sample_obj) {
diff --git a/src/profiler/sampler.h b/src/profiler/sampler.h
index 354e935..8e8ef1c 100644
--- a/src/profiler/sampler.h
+++ b/src/profiler/sampler.h
@@ -34,12 +34,13 @@
   TickSample()
       : state(OTHER),
         pc(NULL),
-        external_callback(NULL),
+        external_callback_entry(NULL),
         frames_count(0),
         has_external_callback(false),
+        update_stats(true),
         top_frame_type(StackFrame::NONE) {}
   void Init(Isolate* isolate, const v8::RegisterState& state,
-            RecordCEntryFrame record_c_entry_frame);
+            RecordCEntryFrame record_c_entry_frame, bool update_stats);
   static void GetStackSample(Isolate* isolate, const v8::RegisterState& state,
                              RecordCEntryFrame record_c_entry_frame,
                              void** frames, size_t frames_limit,
@@ -48,7 +49,7 @@
   Address pc;      // Instruction pointer.
   union {
     Address tos;   // Top stack value (*sp).
-    Address external_callback;
+    Address external_callback_entry;
   };
   static const unsigned kMaxFramesCountLog2 = 8;
   static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
@@ -56,6 +57,7 @@
   base::TimeTicks timestamp;
   unsigned frames_count : kMaxFramesCountLog2;  // Number of captured frames.
   bool has_external_callback : 1;
+  bool update_stats : 1;  // Whether the sample should update aggregated stats.
   StackFrame::Type top_frame_type : 4;
 };
 
@@ -98,12 +100,12 @@
   }
 
   // Used in tests to make sure that stack sampling is performed.
-  unsigned js_and_external_sample_count() const {
-    return js_and_external_sample_count_;
-  }
+  unsigned js_sample_count() const { return js_sample_count_; }
+  unsigned external_sample_count() const { return external_sample_count_; }
   void StartCountingSamples() {
-      is_counting_samples_ = true;
-      js_and_external_sample_count_ = 0;
+    js_sample_count_ = 0;
+    external_sample_count_ = 0;
+    is_counting_samples_ = true;
   }
 
   class PlatformData;
@@ -123,9 +125,10 @@
   base::Atomic32 has_processing_thread_;
   base::Atomic32 active_;
   PlatformData* data_;  // Platform specific data.
+  // Counts stack samples taken in various VM states.
   bool is_counting_samples_;
-  // Counts stack samples taken in JS VM state.
-  unsigned js_and_external_sample_count_;
+  unsigned js_sample_count_;
+  unsigned external_sample_count_;
   DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
 };
 
diff --git a/src/profiler/sampling-heap-profiler.cc b/src/profiler/sampling-heap-profiler.cc
new file mode 100644
index 0000000..c13538c
--- /dev/null
+++ b/src/profiler/sampling-heap-profiler.cc
@@ -0,0 +1,260 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/sampling-heap-profiler.h"
+
+#include <stdint.h>
+#include <memory>
+#include "src/api.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/frames-inl.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/profiler/strings-storage.h"
+
+namespace v8 {
+namespace internal {
+
+// We sample with a Poisson process, with constant average sampling interval.
+// This follows the exponential probability distribution with parameter
+// λ = 1/rate where rate is the average number of bytes between samples.
+//
+// Let u be a uniformly distributed random number between 0 and 1, then
+// next_sample = (- ln u) / λ
+intptr_t SamplingAllocationObserver::GetNextSampleInterval(uint64_t rate) {
+  if (FLAG_sampling_heap_profiler_suppress_randomness) {
+    return static_cast<intptr_t>(rate);
+  }
+  double u = random_->NextDouble();
+  double next = (-std::log(u)) * rate;
+  return next < kPointerSize
+             ? kPointerSize
+             : (next > INT_MAX ? INT_MAX : static_cast<intptr_t>(next));
+}
+
+// Samples were collected according to a poisson process. Since we have not
+// recorded all allocations, we must approximate the shape of the underlying
+// space of allocations based on the samples we have collected. Given that
+// we sample at rate R, the probability that an allocation of size S will be
+// sampled is 1-exp(-S/R). This function uses the above probability to
+// approximate the true number of allocations with size *size* given that
+// *count* samples were observed.
+v8::AllocationProfile::Allocation SamplingHeapProfiler::ScaleSample(
+    size_t size, unsigned int count) {
+  double scale = 1.0 / (1.0 - std::exp(-static_cast<double>(size) / rate_));
+  // Round count instead of truncating.
+  return {size, static_cast<unsigned int>(count * scale + 0.5)};
+}
+
+SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
+                                           uint64_t rate, int stack_depth)
+    : isolate_(heap->isolate()),
+      heap_(heap),
+      new_space_observer_(new SamplingAllocationObserver(
+          heap_, static_cast<intptr_t>(rate), rate, this,
+          heap->isolate()->random_number_generator())),
+      other_spaces_observer_(new SamplingAllocationObserver(
+          heap_, static_cast<intptr_t>(rate), rate, this,
+          heap->isolate()->random_number_generator())),
+      names_(names),
+      profile_root_("(root)", v8::UnboundScript::kNoScriptId, 0),
+      samples_(),
+      stack_depth_(stack_depth),
+      rate_(rate) {
+  CHECK_GT(rate_, 0);
+  heap->new_space()->AddAllocationObserver(new_space_observer_.get());
+  AllSpaces spaces(heap);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+    if (space != heap->new_space()) {
+      space->AddAllocationObserver(other_spaces_observer_.get());
+    }
+  }
+}
+
+
+SamplingHeapProfiler::~SamplingHeapProfiler() {
+  heap_->new_space()->RemoveAllocationObserver(new_space_observer_.get());
+  AllSpaces spaces(heap_);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+    if (space != heap_->new_space()) {
+      space->RemoveAllocationObserver(other_spaces_observer_.get());
+    }
+  }
+
+  for (auto sample : samples_) {
+    delete sample;
+  }
+  std::set<Sample*> empty;
+  samples_.swap(empty);
+}
+
+
+void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
+  DisallowHeapAllocation no_allocation;
+
+  HandleScope scope(isolate_);
+  HeapObject* heap_object = HeapObject::FromAddress(soon_object);
+  Handle<Object> obj(heap_object, isolate_);
+
+  // Mark the new block as FreeSpace to make sure the heap is iterable while we
+  // are taking the sample.
+  heap()->CreateFillerObjectAt(soon_object, static_cast<int>(size));
+
+  Local<v8::Value> loc = v8::Utils::ToLocal(obj);
+
+  AllocationNode* node = AddStack();
+  node->allocations_[size]++;
+  Sample* sample = new Sample(size, node, loc, this);
+  samples_.insert(sample);
+  sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
+}
+
+void SamplingHeapProfiler::OnWeakCallback(
+    const WeakCallbackInfo<Sample>& data) {
+  Sample* sample = data.GetParameter();
+  AllocationNode* node = sample->owner;
+  DCHECK(node->allocations_[sample->size] > 0);
+  node->allocations_[sample->size]--;
+  sample->profiler->samples_.erase(sample);
+  delete sample;
+}
+
+SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::FindOrAddChildNode(
+    AllocationNode* parent, const char* name, int script_id,
+    int start_position) {
+  for (AllocationNode* child : parent->children_) {
+    if (child->script_id_ == script_id &&
+        child->script_position_ == start_position &&
+        strcmp(child->name_, name) == 0) {
+      return child;
+    }
+  }
+  AllocationNode* child = new AllocationNode(name, script_id, start_position);
+  parent->children_.push_back(child);
+  return child;
+}
+
+SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
+  AllocationNode* node = &profile_root_;
+
+  std::vector<SharedFunctionInfo*> stack;
+  StackTraceFrameIterator it(isolate_);
+  int frames_captured = 0;
+  while (!it.done() && frames_captured < stack_depth_) {
+    JavaScriptFrame* frame = it.frame();
+    SharedFunctionInfo* shared = frame->function()->shared();
+    stack.push_back(shared);
+
+    frames_captured++;
+    it.Advance();
+  }
+
+  if (frames_captured == 0) {
+    const char* name = nullptr;
+    switch (isolate_->current_vm_state()) {
+      case GC:
+        name = "(GC)";
+        break;
+      case COMPILER:
+        name = "(COMPILER)";
+        break;
+      case OTHER:
+        name = "(V8 API)";
+        break;
+      case EXTERNAL:
+        name = "(EXTERNAL)";
+        break;
+      case IDLE:
+        name = "(IDLE)";
+        break;
+      case JS:
+        name = "(JS)";
+        break;
+    }
+    return FindOrAddChildNode(node, name, v8::UnboundScript::kNoScriptId, 0);
+  }
+
+  // We need to process the stack in reverse order as the top of the stack is
+  // the first element in the list.
+  for (auto it = stack.rbegin(); it != stack.rend(); ++it) {
+    SharedFunctionInfo* shared = *it;
+    const char* name = this->names()->GetFunctionName(shared->DebugName());
+    int script_id = v8::UnboundScript::kNoScriptId;
+    if (shared->script()->IsScript()) {
+      Script* script = Script::cast(shared->script());
+      script_id = script->id();
+    }
+    node = FindOrAddChildNode(node, name, script_id, shared->start_position());
+  }
+  return node;
+}
+
+v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
+    AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
+    const std::map<int, Script*>& scripts) {
+  Local<v8::String> script_name =
+      ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(""));
+  int line = v8::AllocationProfile::kNoLineNumberInfo;
+  int column = v8::AllocationProfile::kNoColumnNumberInfo;
+  std::vector<v8::AllocationProfile::Allocation> allocations;
+  allocations.reserve(node->allocations_.size());
+  if (node->script_id_ != v8::UnboundScript::kNoScriptId) {
+    // Cannot use std::map<T>::at because it is not available on android.
+    auto non_const_scripts = const_cast<std::map<int, Script*>&>(scripts);
+    Script* script = non_const_scripts[node->script_id_];
+    if (script->name()->IsName()) {
+      Name* name = Name::cast(script->name());
+      script_name = ToApiHandle<v8::String>(
+          isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
+    }
+    Handle<Script> script_handle(script);
+
+    line = 1 + Script::GetLineNumber(script_handle, node->script_position_);
+    column = 1 + Script::GetColumnNumber(script_handle, node->script_position_);
+    for (auto alloc : node->allocations_) {
+      allocations.push_back(ScaleSample(alloc.first, alloc.second));
+    }
+  }
+
+  profile->nodes().push_back(v8::AllocationProfile::Node(
+      {ToApiHandle<v8::String>(
+           isolate_->factory()->InternalizeUtf8String(node->name_)),
+       script_name, node->script_id_, node->script_position_, line, column,
+       std::vector<v8::AllocationProfile::Node*>(), allocations}));
+  v8::AllocationProfile::Node* current = &profile->nodes().back();
+  size_t child_len = node->children_.size();
+  // The children vector may have nodes appended to it during translation
+  // because the translation may allocate strings on the JS heap that have
+  // the potential to be sampled. We cache the length of the vector before
+  // iteration so that nodes appended to the vector during iteration are
+  // not processed.
+  for (size_t i = 0; i < child_len; i++) {
+    current->children.push_back(
+        TranslateAllocationNode(profile, node->children_[i], scripts));
+  }
+  return current;
+}
+
+v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
+  // To resolve positions to line/column numbers, we will need to look up
+  // scripts. Build a map to allow fast mapping from script id to script.
+  std::map<int, Script*> scripts;
+  {
+    Script::Iterator iterator(isolate_);
+    Script* script;
+    while ((script = iterator.Next())) {
+      scripts[script->id()] = script;
+    }
+  }
+
+  auto profile = new v8::internal::AllocationProfile();
+
+  TranslateAllocationNode(profile, &profile_root_, scripts);
+
+  return profile;
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/profiler/sampling-heap-profiler.h b/src/profiler/sampling-heap-profiler.h
new file mode 100644
index 0000000..0b538b0
--- /dev/null
+++ b/src/profiler/sampling-heap-profiler.h
@@ -0,0 +1,166 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_SAMPLING_HEAP_PROFILER_H_
+#define V8_PROFILER_SAMPLING_HEAP_PROFILER_H_
+
+#include <deque>
+#include <map>
+#include <set>
+#include "include/v8-profiler.h"
+#include "src/heap/heap.h"
+#include "src/profiler/strings-storage.h"
+
+namespace v8 {
+
+namespace base {
+class RandomNumberGenerator;
+}
+
+namespace internal {
+
+class SamplingAllocationObserver;
+
+class AllocationProfile : public v8::AllocationProfile {
+ public:
+  AllocationProfile() : nodes_() {}
+
+  v8::AllocationProfile::Node* GetRootNode() override {
+    return nodes_.size() == 0 ? nullptr : &nodes_.front();
+  }
+
+  std::deque<v8::AllocationProfile::Node>& nodes() { return nodes_; }
+
+ private:
+  std::deque<v8::AllocationProfile::Node> nodes_;
+
+  DISALLOW_COPY_AND_ASSIGN(AllocationProfile);
+};
+
+class SamplingHeapProfiler {
+ public:
+  SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
+                       int stack_depth);
+  ~SamplingHeapProfiler();
+
+  v8::AllocationProfile* GetAllocationProfile();
+
+  StringsStorage* names() const { return names_; }
+
+  class AllocationNode;
+
+  struct Sample {
+   public:
+    Sample(size_t size_, AllocationNode* owner_, Local<Value> local_,
+           SamplingHeapProfiler* profiler_)
+        : size(size_),
+          owner(owner_),
+          global(Global<Value>(
+              reinterpret_cast<v8::Isolate*>(profiler_->isolate_), local_)),
+          profiler(profiler_) {}
+    ~Sample() { global.Reset(); }
+    const size_t size;
+    AllocationNode* const owner;
+    Global<Value> global;
+    SamplingHeapProfiler* const profiler;
+
+   private:
+    DISALLOW_COPY_AND_ASSIGN(Sample);
+  };
+
+  class AllocationNode {
+   public:
+    AllocationNode(const char* const name, int script_id,
+                   const int start_position)
+        : script_id_(script_id),
+          script_position_(start_position),
+          name_(name) {}
+    ~AllocationNode() {
+      for (auto child : children_) {
+        delete child;
+      }
+    }
+
+   private:
+    std::map<size_t, unsigned int> allocations_;
+    std::vector<AllocationNode*> children_;
+    const int script_id_;
+    const int script_position_;
+    const char* const name_;
+
+    friend class SamplingHeapProfiler;
+
+    DISALLOW_COPY_AND_ASSIGN(AllocationNode);
+  };
+
+ private:
+  Heap* heap() const { return heap_; }
+
+  void SampleObject(Address soon_object, size_t size);
+
+  static void OnWeakCallback(const WeakCallbackInfo<Sample>& data);
+
+  // Methods that construct v8::AllocationProfile.
+
+  // Translates the provided AllocationNode *node* returning an equivalent
+  // AllocationProfile::Node. The newly created AllocationProfile::Node is added
+  // to the provided AllocationProfile *profile*. Line numbers, column numbers,
+  // and script names are resolved using *scripts* which maps all currently
+  // loaded scripts keyed by their script id.
+  v8::AllocationProfile::Node* TranslateAllocationNode(
+      AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
+      const std::map<int, Script*>& scripts);
+  v8::AllocationProfile::Allocation ScaleSample(size_t size,
+                                                unsigned int count);
+  AllocationNode* AddStack();
+  AllocationNode* FindOrAddChildNode(AllocationNode* parent, const char* name,
+                                     int script_id, int start_position);
+
+  Isolate* const isolate_;
+  Heap* const heap_;
+  base::SmartPointer<SamplingAllocationObserver> new_space_observer_;
+  base::SmartPointer<SamplingAllocationObserver> other_spaces_observer_;
+  StringsStorage* const names_;
+  AllocationNode profile_root_;
+  std::set<Sample*> samples_;
+  const int stack_depth_;
+  const uint64_t rate_;
+
+  friend class SamplingAllocationObserver;
+};
+
+class SamplingAllocationObserver : public AllocationObserver {
+ public:
+  SamplingAllocationObserver(Heap* heap, intptr_t step_size, uint64_t rate,
+                             SamplingHeapProfiler* profiler,
+                             base::RandomNumberGenerator* random)
+      : AllocationObserver(step_size),
+        profiler_(profiler),
+        heap_(heap),
+        random_(random),
+        rate_(rate) {}
+  virtual ~SamplingAllocationObserver() {}
+
+ protected:
+  void Step(int bytes_allocated, Address soon_object, size_t size) override {
+    USE(heap_);
+    DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
+    DCHECK(soon_object);
+    profiler_->SampleObject(soon_object, size);
+  }
+
+  intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
+
+ private:
+  intptr_t GetNextSampleInterval(uint64_t rate);
+  SamplingHeapProfiler* const profiler_;
+  Heap* const heap_;
+  base::RandomNumberGenerator* const random_;
+  uint64_t const rate_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_PROFILER_SAMPLING_HEAP_PROFILER_H_
diff --git a/src/property-descriptor.cc b/src/property-descriptor.cc
index 243a9fa..750f948 100644
--- a/src/property-descriptor.cc
+++ b/src/property-descriptor.cc
@@ -13,6 +13,8 @@
 namespace v8 {
 namespace internal {
 
+namespace {
+
 // Helper function for ToPropertyDescriptor. Comments describe steps for
 // "enumerable", other properties are handled the same way.
 // Returns false if an exception was thrown.
@@ -101,19 +103,51 @@
 }
 
 
-static void CreateDataProperty(Isolate* isolate, Handle<JSObject> object,
-                               Handle<String> name, Handle<Object> value) {
+void CreateDataProperty(Isolate* isolate, Handle<JSObject> object,
+                        Handle<String> name, Handle<Object> value) {
   LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
   Maybe<bool> result = JSObject::CreateDataProperty(&it, value);
   CHECK(result.IsJust() && result.FromJust());
 }
 
+}  // namespace
+
 
 // ES6 6.2.4.4 "FromPropertyDescriptor"
 Handle<Object> PropertyDescriptor::ToObject(Isolate* isolate) {
   DCHECK(!(PropertyDescriptor::IsAccessorDescriptor(this) &&
            PropertyDescriptor::IsDataDescriptor(this)));
   Factory* factory = isolate->factory();
+  if (IsRegularAccessorProperty()) {
+    // Fast case for regular accessor properties.
+    Handle<JSObject> result = factory->NewJSObjectFromMap(
+        isolate->accessor_property_descriptor_map());
+    result->InObjectPropertyAtPut(JSAccessorPropertyDescriptor::kGetIndex,
+                                  *get());
+    result->InObjectPropertyAtPut(JSAccessorPropertyDescriptor::kSetIndex,
+                                  *set());
+    result->InObjectPropertyAtPut(
+        JSAccessorPropertyDescriptor::kEnumerableIndex,
+        isolate->heap()->ToBoolean(enumerable()));
+    result->InObjectPropertyAtPut(
+        JSAccessorPropertyDescriptor::kConfigurableIndex,
+        isolate->heap()->ToBoolean(configurable()));
+    return result;
+  }
+  if (IsRegularDataProperty()) {
+    // Fast case for regular data properties.
+    Handle<JSObject> result =
+        factory->NewJSObjectFromMap(isolate->data_property_descriptor_map());
+    result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kValueIndex,
+                                  *value());
+    result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kWritableIndex,
+                                  isolate->heap()->ToBoolean(writable()));
+    result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kEnumerableIndex,
+                                  isolate->heap()->ToBoolean(enumerable()));
+    result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kConfigurableIndex,
+                                  isolate->heap()->ToBoolean(configurable()));
+    return result;
+  }
   Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
   if (has_value()) {
     CreateDataProperty(isolate, result, factory->value_string(), value());
diff --git a/src/property-descriptor.h b/src/property-descriptor.h
index 5fbbfa3..cba43ed 100644
--- a/src/property-descriptor.h
+++ b/src/property-descriptor.h
@@ -57,6 +57,16 @@
            !has_value() && !has_get() && !has_set();
   }
 
+  bool IsRegularAccessorProperty() const {
+    return has_configurable() && has_enumerable() && !has_value() &&
+           !has_writable() && has_get() && has_set();
+  }
+
+  bool IsRegularDataProperty() const {
+    return has_configurable() && has_enumerable() && has_value() &&
+           has_writable() && !has_get() && !has_set();
+  }
+
   bool enumerable() const { return enumerable_; }
   void set_enumerable(bool enumerable) {
     enumerable_ = enumerable;
diff --git a/src/property-details.h b/src/property-details.h
index 44f32cb..fdf2c6c 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -56,9 +56,7 @@
 
 
 class Smi;
-template<class> class TypeImpl;
-struct ZoneTypeConfig;
-typedef TypeImpl<ZoneTypeConfig> Type;
+class Type;
 class TypeInfo;
 
 // Type of properties.
@@ -332,6 +330,7 @@
   bool IsReadOnly() const { return (attributes() & READ_ONLY) != 0; }
   bool IsConfigurable() const { return (attributes() & DONT_DELETE) == 0; }
   bool IsDontEnum() const { return (attributes() & DONT_ENUM) != 0; }
+  bool IsEnumerable() const { return !IsDontEnum(); }
   PropertyCellType cell_type() const {
     return PropertyCellTypeField::decode(value_);
   }
diff --git a/src/property.cc b/src/property.cc
index 93ba43c..a4e0d67 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -4,6 +4,7 @@
 
 #include "src/property.h"
 
+#include "src/field-type.h"
 #include "src/handles-inl.h"
 #include "src/ostreams.h"
 
@@ -20,6 +21,11 @@
   return os;
 }
 
+DataDescriptor::DataDescriptor(Handle<Name> key, int field_index,
+                               PropertyAttributes attributes,
+                               Representation representation)
+    : Descriptor(key, FieldType::Any(key->GetIsolate()), attributes, DATA,
+                 representation, field_index) {}
 
 struct FastPropertyDetails {
   explicit FastPropertyDetails(const PropertyDetails& v) : details(v) {}
diff --git a/src/property.h b/src/property.h
index b58c9c6..add9e4d 100644
--- a/src/property.h
+++ b/src/property.h
@@ -8,9 +8,7 @@
 #include <iosfwd>
 
 #include "src/factory.h"
-#include "src/field-index.h"
 #include "src/isolate.h"
-#include "src/types.h"
 
 namespace v8 {
 namespace internal {
@@ -22,13 +20,6 @@
 // optionally a piece of data.
 class Descriptor BASE_EMBEDDED {
  public:
-  void KeyToUniqueName() {
-    if (!key_->IsUniqueName()) {
-      key_ = key_->GetIsolate()->factory()->InternalizeString(
-          Handle<String>::cast(key_));
-    }
-  }
-
   Handle<Name> GetKey() const { return key_; }
   Handle<Object> GetValue() const { return value_; }
   PropertyDetails GetDetails() const { return details_; }
@@ -44,25 +35,25 @@
   Descriptor() : details_(Smi::FromInt(0)) {}
 
   void Init(Handle<Name> key, Handle<Object> value, PropertyDetails details) {
+    DCHECK(key->IsUniqueName());
     key_ = key;
     value_ = value;
     details_ = details;
   }
 
   Descriptor(Handle<Name> key, Handle<Object> value, PropertyDetails details)
-      : key_(key),
-        value_(value),
-        details_(details) { }
+      : key_(key), value_(value), details_(details) {
+    DCHECK(key->IsUniqueName());
+  }
 
-  Descriptor(Handle<Name> key,
-             Handle<Object> value,
-             PropertyAttributes attributes,
-             PropertyType type,
-             Representation representation,
-             int field_index = 0)
+  Descriptor(Handle<Name> key, Handle<Object> value,
+             PropertyAttributes attributes, PropertyType type,
+             Representation representation, int field_index = 0)
       : key_(key),
         value_(value),
-        details_(attributes, type, representation, field_index) { }
+        details_(attributes, type, representation, field_index) {
+    DCHECK(key->IsUniqueName());
+  }
 
   friend class DescriptorArray;
   friend class Map;
@@ -75,9 +66,7 @@
 class DataDescriptor final : public Descriptor {
  public:
   DataDescriptor(Handle<Name> key, int field_index,
-                 PropertyAttributes attributes, Representation representation)
-      : Descriptor(key, HeapType::Any(key->GetIsolate()), attributes, DATA,
-                   representation, field_index) {}
+                 PropertyAttributes attributes, Representation representation);
   // The field type is either a simple type or a map wrapped in a weak cell.
   DataDescriptor(Handle<Name> key, int field_index,
                  Handle<Object> wrapped_field_type,
diff --git a/src/prototype.h b/src/prototype.h
index 3253791..c5e9545 100644
--- a/src/prototype.h
+++ b/src/prototype.h
@@ -22,6 +22,7 @@
  * The PrototypeIterator can either run to the null_value(), the first
  * non-hidden prototype, or a given object.
  */
+
 class PrototypeIterator {
  public:
   enum WhereToStart { START_AT_RECEIVER, START_AT_PROTOTYPE };
@@ -30,40 +31,42 @@
 
   const int kProxyPrototypeLimit = 100 * 1000;
 
-  PrototypeIterator(Isolate* isolate, Handle<Object> receiver,
-                    WhereToStart where_to_start = START_AT_PROTOTYPE)
-      : did_jump_to_prototype_chain_(false),
-        object_(NULL),
+  PrototypeIterator(Isolate* isolate, Handle<JSReceiver> receiver,
+                    WhereToStart where_to_start = START_AT_PROTOTYPE,
+                    WhereToEnd where_to_end = END_AT_NULL)
+      : object_(NULL),
         handle_(receiver),
         isolate_(isolate),
+        where_to_end_(where_to_end),
+        is_at_end_(false),
         seen_proxies_(0) {
     CHECK(!handle_.is_null());
-    if (where_to_start == START_AT_PROTOTYPE) {
-      Advance();
-    }
+    if (where_to_start == START_AT_PROTOTYPE) Advance();
   }
 
-  PrototypeIterator(Isolate* isolate, Object* receiver,
-                    WhereToStart where_to_start = START_AT_PROTOTYPE)
-      : did_jump_to_prototype_chain_(false),
-        object_(receiver),
+  PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
+                    WhereToStart where_to_start = START_AT_PROTOTYPE,
+                    WhereToEnd where_to_end = END_AT_NULL)
+      : object_(receiver),
         isolate_(isolate),
+        where_to_end_(where_to_end),
+        is_at_end_(false),
         seen_proxies_(0) {
-    if (where_to_start == START_AT_PROTOTYPE) {
-      Advance();
-    }
+    if (where_to_start == START_AT_PROTOTYPE) Advance();
   }
 
   explicit PrototypeIterator(Map* receiver_map)
-      : did_jump_to_prototype_chain_(true),
-        object_(receiver_map->prototype()),
-        isolate_(receiver_map->GetIsolate()) {}
+      : object_(receiver_map->prototype()),
+        isolate_(receiver_map->GetIsolate()),
+        where_to_end_(END_AT_NULL),
+        is_at_end_(object_->IsNull()) {}
 
   explicit PrototypeIterator(Handle<Map> receiver_map)
-      : did_jump_to_prototype_chain_(true),
-        object_(NULL),
+      : object_(NULL),
         handle_(handle(receiver_map->prototype(), receiver_map->GetIsolate())),
-        isolate_(receiver_map->GetIsolate()) {}
+        isolate_(receiver_map->GetIsolate()),
+        where_to_end_(END_AT_NULL),
+        is_at_end_(handle_->IsNull()) {}
 
   ~PrototypeIterator() {}
 
@@ -93,32 +96,30 @@
 
   void Advance() {
     if (handle_.is_null() && object_->IsJSProxy()) {
-      did_jump_to_prototype_chain_ = true;
+      is_at_end_ = true;
       object_ = isolate_->heap()->null_value();
       return;
     } else if (!handle_.is_null() && handle_->IsJSProxy()) {
-      did_jump_to_prototype_chain_ = true;
-      handle_ = handle(isolate_->heap()->null_value(), isolate_);
+      is_at_end_ = true;
+      handle_ = isolate_->factory()->null_value();
       return;
     }
     AdvanceIgnoringProxies();
   }
 
   void AdvanceIgnoringProxies() {
-    if (!did_jump_to_prototype_chain_) {
-      did_jump_to_prototype_chain_ = true;
-      if (handle_.is_null()) {
-        object_ = object_->GetRootMap(isolate_)->prototype();
-      } else {
-        handle_ = handle(handle_->GetRootMap(isolate_)->prototype(), isolate_);
-      }
+    Object* object = handle_.is_null() ? object_ : *handle_;
+    Map* map = HeapObject::cast(object)->map();
+
+    Object* prototype = map->prototype();
+    is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN
+                     ? !map->has_hidden_prototype()
+                     : prototype->IsNull();
+
+    if (handle_.is_null()) {
+      object_ = prototype;
     } else {
-      if (handle_.is_null()) {
-        object_ = HeapObject::cast(object_)->map()->prototype();
-      } else {
-        handle_ =
-            handle(HeapObject::cast(*handle_)->map()->prototype(), isolate_);
-      }
+      handle_ = handle(prototype, isolate_);
     }
   }
 
@@ -129,6 +130,7 @@
     if (!HasAccess()) {
       // Abort the lookup if we do not have access to the current object.
       handle_ = isolate_->factory()->null_value();
+      is_at_end_ = true;
       return true;
     }
     if (handle_.is_null() || !handle_->IsJSProxy()) {
@@ -143,41 +145,21 @@
           *isolate_->factory()->NewRangeError(MessageTemplate::kStackOverflow));
       return false;
     }
-    did_jump_to_prototype_chain_ = true;
     MaybeHandle<Object> proto =
         JSProxy::GetPrototype(Handle<JSProxy>::cast(handle_));
-    return proto.ToHandle(&handle_);
+    if (!proto.ToHandle(&handle_)) return false;
+    is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN || handle_->IsNull();
+    return true;
   }
 
-  bool IsAtEnd(WhereToEnd where_to_end = END_AT_NULL) const {
-    if (handle_.is_null()) {
-      return object_->IsNull() ||
-             (did_jump_to_prototype_chain_ &&
-              where_to_end == END_AT_NON_HIDDEN &&
-              !HeapObject::cast(object_)->map()->is_hidden_prototype());
-    } else {
-      return handle_->IsNull() ||
-             (did_jump_to_prototype_chain_ &&
-              where_to_end == END_AT_NON_HIDDEN &&
-              !Handle<HeapObject>::cast(handle_)->map()->is_hidden_prototype());
-    }
-  }
-
-  bool IsAtEnd(Object* final_object) {
-    DCHECK(handle_.is_null());
-    return object_->IsNull() || object_ == final_object;
-  }
-
-  bool IsAtEnd(Handle<Object> final_object) {
-    DCHECK(!handle_.is_null());
-    return handle_->IsNull() || *handle_ == *final_object;
-  }
+  bool IsAtEnd() const { return is_at_end_; }
 
  private:
-  bool did_jump_to_prototype_chain_;
   Object* object_;
   Handle<Object> handle_;
   Isolate* isolate_;
+  WhereToEnd where_to_end_;
+  bool is_at_end_;
   int seen_proxies_;
 
   DISALLOW_COPY_AND_ASSIGN(PrototypeIterator);
diff --git a/src/regexp/arm/regexp-macro-assembler-arm.cc b/src/regexp/arm/regexp-macro-assembler-arm.cc
index 6fafdfb..ce72188 100644
--- a/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -210,7 +210,7 @@
 
 
 void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
-    int start_reg, bool read_backward, Label* on_no_match) {
+    int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
   Label fallthrough;
   __ ldr(r0, register_location(start_reg));  // Index of start of capture
   __ ldr(r1, register_location(start_reg + 1));  // Index of end of capture
@@ -302,7 +302,7 @@
     //   r0: Address byte_offset1 - Address captured substring's start.
     //   r1: Address byte_offset2 - Address of current character position.
     //   r2: size_t byte_length - length of capture in bytes(!)
-    //   r3: Isolate* isolate
+    //   r3: Isolate* isolate or 0 if unicode flag.
 
     // Address of start of capture.
     __ add(r0, r0, Operand(end_of_input_address()));
@@ -316,7 +316,14 @@
       __ sub(r1, r1, r4);
     }
     // Isolate.
-    __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+#ifdef V8_I18N_SUPPORT
+    if (unicode) {
+      __ mov(r3, Operand(0));
+    } else  // NOLINT
+#endif      // V8_I18N_SUPPORT
+    {
+      __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+    }
 
     {
       AllowExternalCallThatCantCauseGC scope(masm_);
@@ -798,9 +805,12 @@
         __ cmp(current_input_offset(), Operand::Zero());
         __ b(eq, &exit_label_);
         // Advance current position after a zero-length match.
+        Label advance;
+        __ bind(&advance);
         __ add(current_input_offset(),
                current_input_offset(),
                Operand((mode_ == UC16) ? 2 : 1));
+        if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
       }
 
       __ b(&load_char_start_regexp);
diff --git a/src/regexp/arm/regexp-macro-assembler-arm.h b/src/regexp/arm/regexp-macro-assembler-arm.h
index 233a98f..f808538 100644
--- a/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -38,7 +38,7 @@
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match);
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               bool read_backward,
+                                               bool read_backward, bool unicode,
                                                Label* on_no_match);
   virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
   virtual void CheckNotCharacterAfterAnd(unsigned c,
diff --git a/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 9948597..941ccea 100644
--- a/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -274,7 +274,7 @@
 
 
 void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
-    int start_reg, bool read_backward, Label* on_no_match) {
+    int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
   Label fallthrough;
 
   Register capture_start_offset = w10;
@@ -388,7 +388,7 @@
     //   x0: Address byte_offset1 - Address captured substring's start.
     //   x1: Address byte_offset2 - Address of current character position.
     //   w2: size_t byte_length - length of capture in bytes(!)
-    //   x3: Isolate* isolate
+    //   x3: Isolate* isolate or 0 if unicode flag
 
     // Address of start of capture.
     __ Add(x0, input_end(), Operand(capture_start_offset, SXTW));
@@ -400,7 +400,14 @@
       __ Sub(x1, x1, Operand(capture_length, SXTW));
     }
     // Isolate.
-    __ Mov(x3, ExternalReference::isolate_address(isolate()));
+#ifdef V8_I18N_SUPPORT
+    if (unicode) {
+      __ Mov(x3, Operand(0));
+    } else  // NOLINT
+#endif      // V8_I18N_SUPPORT
+    {
+      __ Mov(x3, ExternalReference::isolate_address(isolate()));
+    }
 
     {
       AllowExternalCallThatCantCauseGC scope(masm_);
@@ -991,9 +998,12 @@
         // Offset from the end is zero if we already reached the end.
         __ Cbz(current_input_offset(), &return_w0);
         // Advance current position after a zero-length match.
+        Label advance;
+        __ bind(&advance);
         __ Add(current_input_offset(),
                current_input_offset(),
                Operand((mode_ == UC16) ? 2 : 1));
+        if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
       }
 
       __ B(&load_char_start_regexp);
diff --git a/src/regexp/arm64/regexp-macro-assembler-arm64.h b/src/regexp/arm64/regexp-macro-assembler-arm64.h
index d71f063..69624f6 100644
--- a/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -43,7 +43,7 @@
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match);
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               bool read_backward,
+                                               bool read_backward, bool unicode,
                                                Label* on_no_match);
   virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
   virtual void CheckNotCharacterAfterAnd(unsigned c,
diff --git a/src/regexp/bytecodes-irregexp.h b/src/regexp/bytecodes-irregexp.h
index 2dbfbc0..3848f15 100644
--- a/src/regexp/bytecodes-irregexp.h
+++ b/src/regexp/bytecodes-irregexp.h
@@ -6,6 +6,8 @@
 #ifndef V8_REGEXP_BYTECODES_IRREGEXP_H_
 #define V8_REGEXP_BYTECODES_IRREGEXP_H_
 
+#ifdef V8_INTERPRETED_REGEXP
+
 namespace v8 {
 namespace internal {
 
@@ -18,56 +20,58 @@
 const int BYTECODE_SHIFT = 8;
 
 #define BYTECODE_ITERATOR(V)                                                   \
-V(BREAK,              0, 4)   /* bc8                                        */ \
-V(PUSH_CP,            1, 4)   /* bc8 pad24                                  */ \
-V(PUSH_BT,            2, 8)   /* bc8 pad24 offset32                         */ \
-V(PUSH_REGISTER,      3, 4)   /* bc8 reg_idx24                              */ \
-V(SET_REGISTER_TO_CP, 4, 8)   /* bc8 reg_idx24 offset32                     */ \
-V(SET_CP_TO_REGISTER, 5, 4)   /* bc8 reg_idx24                              */ \
-V(SET_REGISTER_TO_SP, 6, 4)   /* bc8 reg_idx24                              */ \
-V(SET_SP_TO_REGISTER, 7, 4)   /* bc8 reg_idx24                              */ \
-V(SET_REGISTER,       8, 8)   /* bc8 reg_idx24 value32                      */ \
-V(ADVANCE_REGISTER,   9, 8)   /* bc8 reg_idx24 value32                      */ \
-V(POP_CP,            10, 4)   /* bc8 pad24                                  */ \
-V(POP_BT,            11, 4)   /* bc8 pad24                                  */ \
-V(POP_REGISTER,      12, 4)   /* bc8 reg_idx24                              */ \
-V(FAIL,              13, 4)   /* bc8 pad24                                  */ \
-V(SUCCEED,           14, 4)   /* bc8 pad24                                  */ \
-V(ADVANCE_CP,        15, 4)   /* bc8 offset24                               */ \
-V(GOTO,              16, 8)   /* bc8 pad24 addr32                           */ \
-V(LOAD_CURRENT_CHAR, 17, 8)   /* bc8 offset24 addr32                        */ \
-V(LOAD_CURRENT_CHAR_UNCHECKED, 18, 4) /* bc8 offset24                       */ \
-V(LOAD_2_CURRENT_CHARS, 19, 8) /* bc8 offset24 addr32                       */ \
-V(LOAD_2_CURRENT_CHARS_UNCHECKED, 20, 4) /* bc8 offset24                    */ \
-V(LOAD_4_CURRENT_CHARS, 21, 8) /* bc8 offset24 addr32                       */ \
-V(LOAD_4_CURRENT_CHARS_UNCHECKED, 22, 4) /* bc8 offset24                    */ \
-V(CHECK_4_CHARS,     23, 12)  /* bc8 pad24 uint32 addr32                    */ \
-V(CHECK_CHAR,        24, 8)   /* bc8 pad8 uint16 addr32                     */ \
-V(CHECK_NOT_4_CHARS, 25, 12)  /* bc8 pad24 uint32 addr32                    */ \
-V(CHECK_NOT_CHAR,    26, 8)   /* bc8 pad8 uint16 addr32                     */ \
-V(AND_CHECK_4_CHARS, 27, 16)  /* bc8 pad24 uint32 uint32 addr32             */ \
-V(AND_CHECK_CHAR,    28, 12)  /* bc8 pad8 uint16 uint32 addr32              */ \
-V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32          */ \
-V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32              */ \
-V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32       */ \
-V(CHECK_CHAR_IN_RANGE, 32, 12) /* bc8 pad24 uc16 uc16 addr32                */ \
-V(CHECK_CHAR_NOT_IN_RANGE, 33, 12) /* bc8 pad24 uc16 uc16 addr32            */ \
-V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128                   */ \
-V(CHECK_LT,          35, 8)   /* bc8 pad8 uc16 addr32                       */ \
-V(CHECK_GT,          36, 8)   /* bc8 pad8 uc16 addr32                       */ \
-V(CHECK_NOT_BACK_REF, 37, 8)  /* bc8 reg_idx24 addr32                       */ \
-V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32                */ \
-V(CHECK_NOT_BACK_REF_BACKWARD, 39, 8)         /* bc8 reg_idx24 addr32       */ \
-V(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD, 40, 8) /* bc8 reg_idx24 addr32       */ \
-V(CHECK_NOT_REGS_EQUAL, 41, 12) /* bc8 regidx24 reg_idx32 addr32            */ \
-V(CHECK_REGISTER_LT, 42, 12)  /* bc8 reg_idx24 value32 addr32               */ \
-V(CHECK_REGISTER_GE, 43, 12)  /* bc8 reg_idx24 value32 addr32               */ \
-V(CHECK_REGISTER_EQ_POS, 44, 8) /* bc8 reg_idx24 addr32                     */ \
-V(CHECK_AT_START,    45, 8)   /* bc8 pad24 addr32                           */ \
-V(CHECK_NOT_AT_START, 46, 8)  /* bc8 offset24 addr32                        */ \
-V(CHECK_GREEDY,      47, 8)   /* bc8 pad24 addr32                           */ \
-V(ADVANCE_CP_AND_GOTO, 48, 8) /* bc8 offset24 addr32                        */ \
-V(SET_CURRENT_POSITION_FROM_END, 49, 4) /* bc8 idx24                        */
+  V(BREAK, 0, 4)              /* bc8                                        */ \
+  V(PUSH_CP, 1, 4)            /* bc8 pad24                                  */ \
+  V(PUSH_BT, 2, 8)            /* bc8 pad24 offset32                         */ \
+  V(PUSH_REGISTER, 3, 4)      /* bc8 reg_idx24                              */ \
+  V(SET_REGISTER_TO_CP, 4, 8) /* bc8 reg_idx24 offset32                     */ \
+  V(SET_CP_TO_REGISTER, 5, 4) /* bc8 reg_idx24                              */ \
+  V(SET_REGISTER_TO_SP, 6, 4) /* bc8 reg_idx24                              */ \
+  V(SET_SP_TO_REGISTER, 7, 4) /* bc8 reg_idx24                              */ \
+  V(SET_REGISTER, 8, 8)       /* bc8 reg_idx24 value32                      */ \
+  V(ADVANCE_REGISTER, 9, 8)   /* bc8 reg_idx24 value32                      */ \
+  V(POP_CP, 10, 4)            /* bc8 pad24                                  */ \
+  V(POP_BT, 11, 4)            /* bc8 pad24                                  */ \
+  V(POP_REGISTER, 12, 4)      /* bc8 reg_idx24                              */ \
+  V(FAIL, 13, 4)              /* bc8 pad24                                  */ \
+  V(SUCCEED, 14, 4)           /* bc8 pad24                                  */ \
+  V(ADVANCE_CP, 15, 4)        /* bc8 offset24                               */ \
+  V(GOTO, 16, 8)              /* bc8 pad24 addr32                           */ \
+  V(LOAD_CURRENT_CHAR, 17, 8) /* bc8 offset24 addr32                        */ \
+  V(LOAD_CURRENT_CHAR_UNCHECKED, 18, 4)    /* bc8 offset24 */                  \
+  V(LOAD_2_CURRENT_CHARS, 19, 8)           /* bc8 offset24 addr32 */           \
+  V(LOAD_2_CURRENT_CHARS_UNCHECKED, 20, 4) /* bc8 offset24 */                  \
+  V(LOAD_4_CURRENT_CHARS, 21, 8)           /* bc8 offset24 addr32 */           \
+  V(LOAD_4_CURRENT_CHARS_UNCHECKED, 22, 4) /* bc8 offset24 */                  \
+  V(CHECK_4_CHARS, 23, 12) /* bc8 pad24 uint32 addr32                    */    \
+  V(CHECK_CHAR, 24, 8)     /* bc8 pad8 uint16 addr32                     */    \
+  V(CHECK_NOT_4_CHARS, 25, 12) /* bc8 pad24 uint32 addr32 */                   \
+  V(CHECK_NOT_CHAR, 26, 8) /* bc8 pad8 uint16 addr32                     */    \
+  V(AND_CHECK_4_CHARS, 27, 16) /* bc8 pad24 uint32 uint32 addr32 */            \
+  V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32              */   \
+  V(AND_CHECK_NOT_4_CHARS, 29, 16)    /* bc8 pad24 uint32 uint32 addr32 */     \
+  V(AND_CHECK_NOT_CHAR, 30, 12)       /* bc8 pad8 uint16 uint32 addr32 */      \
+  V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32 */     \
+  V(CHECK_CHAR_IN_RANGE, 32, 12)      /* bc8 pad24 uc16 uc16 addr32 */         \
+  V(CHECK_CHAR_NOT_IN_RANGE, 33, 12)  /* bc8 pad24 uc16 uc16 addr32 */         \
+  V(CHECK_BIT_IN_TABLE, 34, 24)       /* bc8 pad24 addr32 bits128 */           \
+  V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32                       */          \
+  V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32                       */          \
+  V(CHECK_NOT_BACK_REF, 37, 8)         /* bc8 reg_idx24 addr32 */              \
+  V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */              \
+  V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE, 39, 8)                                 \
+  V(CHECK_NOT_BACK_REF_BACKWARD, 40, 8)         /* bc8 reg_idx24 addr32 */     \
+  V(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD, 41, 8) /* bc8 reg_idx24 addr32 */     \
+  V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD, 42, 8)                        \
+  V(CHECK_NOT_REGS_EQUAL, 43, 12) /* bc8 regidx24 reg_idx32 addr32 */          \
+  V(CHECK_REGISTER_LT, 44, 12)    /* bc8 reg_idx24 value32 addr32 */           \
+  V(CHECK_REGISTER_GE, 45, 12)    /* bc8 reg_idx24 value32 addr32 */           \
+  V(CHECK_REGISTER_EQ_POS, 46, 8) /* bc8 reg_idx24 addr32 */                   \
+  V(CHECK_AT_START, 47, 8) /* bc8 pad24 addr32                           */    \
+  V(CHECK_NOT_AT_START, 48, 8) /* bc8 offset24 addr32 */                       \
+  V(CHECK_GREEDY, 49, 8) /* bc8 pad24 addr32                           */      \
+  V(ADVANCE_CP_AND_GOTO, 50, 8)           /* bc8 offset24 addr32 */            \
+  V(SET_CURRENT_POSITION_FROM_END, 51, 4) /* bc8 idx24 */
 
 #define DECLARE_BYTECODES(name, code, length) \
   static const int BC_##name = code;
@@ -82,4 +86,6 @@
 }  // namespace internal
 }  // namespace v8
 
+#endif  // V8_INTERPRETED_REGEXP
+
 #endif  // V8_REGEXP_BYTECODES_IRREGEXP_H_
diff --git a/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 6ef0f5f..4c22b43 100644
--- a/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -189,7 +189,7 @@
 
 
 void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
-    int start_reg, bool read_backward, Label* on_no_match) {
+    int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
   Label fallthrough;
   __ mov(edx, register_location(start_reg));  // Index of start of capture
   __ mov(ebx, register_location(start_reg + 1));  // Index of end of capture
@@ -296,11 +296,18 @@
     //   Address byte_offset1 - Address captured substring's start.
     //   Address byte_offset2 - Address of current character position.
     //   size_t byte_length - length of capture in bytes(!)
-    //   Isolate* isolate
+//   Isolate* isolate or 0 if unicode flag.
 
     // Set isolate.
-    __ mov(Operand(esp, 3 * kPointerSize),
-           Immediate(ExternalReference::isolate_address(isolate())));
+#ifdef V8_I18N_SUPPORT
+    if (unicode) {
+      __ mov(Operand(esp, 3 * kPointerSize), Immediate(0));
+    } else  // NOLINT
+#endif      // V8_I18N_SUPPORT
+    {
+      __ mov(Operand(esp, 3 * kPointerSize),
+             Immediate(ExternalReference::isolate_address(isolate())));
+    }
     // Set byte_length.
     __ mov(Operand(esp, 2 * kPointerSize), ebx);
     // Set byte_offset2.
@@ -822,13 +829,15 @@
         __ test(edi, edi);
         __ j(zero, &exit_label_, Label::kNear);
         // Advance current position after a zero-length match.
+        Label advance;
+        __ bind(&advance);
         if (mode_ == UC16) {
           __ add(edi, Immediate(2));
         } else {
           __ inc(edi);
         }
+        if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
       }
-
       __ jmp(&load_char_start_regexp);
     } else {
       __ mov(eax, Immediate(SUCCESS));
diff --git a/src/regexp/ia32/regexp-macro-assembler-ia32.h b/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 1ef87ee..fa17413 100644
--- a/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -37,7 +37,7 @@
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match);
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               bool read_backward,
+                                               bool read_backward, bool unicode,
                                                Label* on_no_match);
   virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
   virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/src/regexp/interpreter-irregexp.cc b/src/regexp/interpreter-irregexp.cc
index ea748e4..14834d5 100644
--- a/src/regexp/interpreter-irregexp.cc
+++ b/src/regexp/interpreter-irregexp.cc
@@ -4,6 +4,8 @@
 
 // A simple interpreter for the Irregexp byte code.
 
+#ifdef V8_INTERPRETED_REGEXP
+
 #include "src/regexp/interpreter-irregexp.h"
 
 #include "src/ast/ast.h"
@@ -13,38 +15,32 @@
 #include "src/unicode.h"
 #include "src/utils.h"
 
+#ifdef V8_I18N_SUPPORT
+#include "unicode/uchar.h"
+#endif  // V8_I18N_SUPPORT
+
 namespace v8 {
 namespace internal {
 
-
 typedef unibrow::Mapping<unibrow::Ecma262Canonicalize> Canonicalize;
 
-static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
-                                 int from,
-                                 int current,
-                                 int len,
-                                 Vector<const uc16> subject) {
-  for (int i = 0; i < len; i++) {
-    unibrow::uchar old_char = subject[from++];
-    unibrow::uchar new_char = subject[current++];
-    if (old_char == new_char) continue;
-    unibrow::uchar old_string[1] = { old_char };
-    unibrow::uchar new_string[1] = { new_char };
-    interp_canonicalize->get(old_char, '\0', old_string);
-    interp_canonicalize->get(new_char, '\0', new_string);
-    if (old_string[0] != new_string[0]) {
-      return false;
-    }
-  }
-  return true;
+static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
+                                 int len, Vector<const uc16> subject,
+                                 bool unicode) {
+  Address offset_a =
+      reinterpret_cast<Address>(const_cast<uc16*>(&subject.at(from)));
+  Address offset_b =
+      reinterpret_cast<Address>(const_cast<uc16*>(&subject.at(current)));
+  size_t length = len * kUC16Size;
+  return RegExpMacroAssembler::CaseInsensitiveCompareUC16(
+             offset_a, offset_b, length, unicode ? nullptr : isolate) == 1;
 }
 
 
-static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
-                                 int from,
-                                 int current,
-                                 int len,
-                                 Vector<const uint8_t> subject) {
+static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
+                                 int len, Vector<const uint8_t> subject,
+                                 bool unicode) {
+  // For Latin1 characters the unicode flag makes no difference.
   for (int i = 0; i < len; i++) {
     unsigned int old_char = subject[from++];
     unsigned int new_char = subject[current++];
@@ -522,13 +518,16 @@
         pc += BC_CHECK_NOT_BACK_REF_BACKWARD_LENGTH;
         break;
       }
+      BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE)
       BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
+        bool unicode =
+            (insn & BYTECODE_MASK) == BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE;
         int from = registers[insn >> BYTECODE_SHIFT];
         int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
         if (from >= 0 && len > 0) {
           if (current + len > subject.length() ||
-              !BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
-                                    from, current, len, subject)) {
+              !BackRefMatchesNoCase(isolate, from, current, len, subject,
+                                    unicode)) {
             pc = code_base + Load32Aligned(pc + 4);
             break;
           }
@@ -537,13 +536,16 @@
         pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
         break;
       }
+      BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD)
       BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD) {
+        bool unicode = (insn & BYTECODE_MASK) ==
+                       BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD;
         int from = registers[insn >> BYTECODE_SHIFT];
         int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
         if (from >= 0 && len > 0) {
           if (current - len < 0 ||
-              !BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
-                                    from, current - len, len, subject)) {
+              !BackRefMatchesNoCase(isolate, from, current - len, len, subject,
+                                    unicode)) {
             pc = code_base + Load32Aligned(pc + 4);
             break;
           }
@@ -619,3 +621,5 @@
 
 }  // namespace internal
 }  // namespace v8
+
+#endif  // V8_INTERPRETED_REGEXP
diff --git a/src/regexp/interpreter-irregexp.h b/src/regexp/interpreter-irregexp.h
index 244af99..887fab6 100644
--- a/src/regexp/interpreter-irregexp.h
+++ b/src/regexp/interpreter-irregexp.h
@@ -7,12 +7,13 @@
 #ifndef V8_REGEXP_INTERPRETER_IRREGEXP_H_
 #define V8_REGEXP_INTERPRETER_IRREGEXP_H_
 
+#ifdef V8_INTERPRETED_REGEXP
+
 #include "src/regexp/jsregexp.h"
 
 namespace v8 {
 namespace internal {
 
-
 class IrregexpInterpreter {
  public:
   static RegExpImpl::IrregexpResult Match(Isolate* isolate,
@@ -26,4 +27,6 @@
 }  // namespace internal
 }  // namespace v8
 
+#endif  // V8_INTERPRETED_REGEXP
+
 #endif  // V8_REGEXP_INTERPRETER_IRREGEXP_H_
diff --git a/src/regexp/jsregexp-inl.h b/src/regexp/jsregexp-inl.h
index 3eb7c3c..ca7a9fe 100644
--- a/src/regexp/jsregexp-inl.h
+++ b/src/regexp/jsregexp-inl.h
@@ -47,7 +47,10 @@
                                              register_array_size_);
     } else {
       int last_start_index = last_match[0];
-      if (last_start_index == last_end_index) last_end_index++;
+      if (last_start_index == last_end_index) {
+        // Zero-length match. Advance by one code point.
+        last_end_index = AdvanceZeroLength(last_end_index);
+      }
       if (last_end_index > subject_->length()) {
         num_matches_ = 0;  // Signal failed match.
         return NULL;
diff --git a/src/regexp/jsregexp.cc b/src/regexp/jsregexp.cc
index 34d20fe..80f48ca 100644
--- a/src/regexp/jsregexp.cc
+++ b/src/regexp/jsregexp.cc
@@ -25,6 +25,11 @@
 #include "src/string-search.h"
 #include "src/unicode-decoder.h"
 
+#ifdef V8_I18N_SUPPORT
+#include "unicode/uset.h"
+#include "unicode/utypes.h"
+#endif  // V8_I18N_SUPPORT
+
 #ifndef V8_INTERPRETED_REGEXP
 #if V8_TARGET_ARCH_IA32
 #include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
@@ -72,7 +77,7 @@
                             int ranges_length,
                             Interval new_range) {
   DCHECK((ranges_length & 1) == 1);
-  DCHECK(ranges[ranges_length - 1] == String::kMaxUtf16CodeUnit + 1);
+  DCHECK(ranges[ranges_length - 1] == String::kMaxCodePoint + 1);
   if (containment == kLatticeUnknown) return containment;
   bool inside = false;
   int last = 0;
@@ -145,9 +150,8 @@
   PostponeInterruptsScope postpone(isolate);
   RegExpCompileData parse_result;
   FlatStringReader reader(isolate, pattern);
-  if (!RegExpParser::ParseRegExp(re->GetIsolate(), &zone, &reader,
-                                 flags & JSRegExp::kMultiline,
-                                 flags & JSRegExp::kUnicode, &parse_result)) {
+  if (!RegExpParser::ParseRegExp(re->GetIsolate(), &zone, &reader, flags,
+                                 &parse_result)) {
     // Throw an exception if we fail to parse the pattern.
     return ThrowRegExpException(re, pattern, parse_result.error);
   }
@@ -371,18 +375,16 @@
   pattern = String::Flatten(pattern);
   RegExpCompileData compile_data;
   FlatStringReader reader(isolate, pattern);
-  if (!RegExpParser::ParseRegExp(isolate, &zone, &reader,
-                                 flags & JSRegExp::kMultiline,
-                                 flags & JSRegExp::kUnicode, &compile_data)) {
+  if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
+                                 &compile_data)) {
     // Throw an exception if we fail to parse the pattern.
     // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
     USE(ThrowRegExpException(re, pattern, compile_data.error));
     return false;
   }
-  RegExpEngine::CompilationResult result = RegExpEngine::Compile(
-      isolate, &zone, &compile_data, flags & JSRegExp::kIgnoreCase,
-      flags & JSRegExp::kGlobal, flags & JSRegExp::kMultiline,
-      flags & JSRegExp::kSticky, pattern, sample_subject, is_one_byte);
+  RegExpEngine::CompilationResult result =
+      RegExpEngine::Compile(isolate, &zone, &compile_data, flags, pattern,
+                            sample_subject, is_one_byte);
   if (result.error_message != NULL) {
     // Unable to compile regexp.
     Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
@@ -636,7 +638,6 @@
 
 RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
                                      Handle<String> subject,
-                                     bool is_global,
                                      Isolate* isolate)
   : register_array_(NULL),
     register_array_size_(0),
@@ -661,7 +662,8 @@
     }
   }
 
-  if (is_global && !interpreted) {
+  DCHECK_NE(0, regexp->GetFlags() & JSRegExp::kGlobal);
+  if (!interpreted) {
     register_array_size_ =
         Max(registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize);
     max_matches_ = register_array_size_ / registers_per_match_;
@@ -690,6 +692,16 @@
   last_match[1] = 0;
 }
 
+int RegExpImpl::GlobalCache::AdvanceZeroLength(int last_index) {
+  if ((regexp_->GetFlags() & JSRegExp::kUnicode) != 0 &&
+      last_index + 1 < subject_->length() &&
+      unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) &&
+      unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) {
+    // Advance over the surrogate pair.
+    return last_index + 2;
+  }
+  return last_index + 1;
+}
 
 // -------------------------------------------------------------------
 // Implementation of the Irregexp regular expression engine.
@@ -945,7 +957,7 @@
 class RegExpCompiler {
  public:
   RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
-                 bool ignore_case, bool is_one_byte);
+                 JSRegExp::Flags flags, bool is_one_byte);
 
   int AllocateRegister() {
     if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
@@ -955,6 +967,22 @@
     return next_register_++;
   }
 
+  // Lookarounds to match lone surrogates for unicode character class matches
+  // are never nested. We can therefore reuse registers.
+  int UnicodeLookaroundStackRegister() {
+    if (unicode_lookaround_stack_register_ == kNoRegister) {
+      unicode_lookaround_stack_register_ = AllocateRegister();
+    }
+    return unicode_lookaround_stack_register_;
+  }
+
+  int UnicodeLookaroundPositionRegister() {
+    if (unicode_lookaround_position_register_ == kNoRegister) {
+      unicode_lookaround_position_register_ = AllocateRegister();
+    }
+    return unicode_lookaround_position_register_;
+  }
+
   RegExpEngine::CompilationResult Assemble(RegExpMacroAssembler* assembler,
                                            RegExpNode* start,
                                            int capture_count,
@@ -981,7 +1009,8 @@
 
   void SetRegExpTooBig() { reg_exp_too_big_ = true; }
 
-  inline bool ignore_case() { return ignore_case_; }
+  inline bool ignore_case() { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
+  inline bool unicode() { return (flags_ & JSRegExp::kUnicode) != 0; }
   inline bool one_byte() { return one_byte_; }
   inline bool optimize() { return optimize_; }
   inline void set_optimize(bool value) { optimize_ = value; }
@@ -1006,10 +1035,12 @@
  private:
   EndNode* accept_;
   int next_register_;
+  int unicode_lookaround_stack_register_;
+  int unicode_lookaround_position_register_;
   List<RegExpNode*>* work_list_;
   int recursion_depth_;
   RegExpMacroAssembler* macro_assembler_;
-  bool ignore_case_;
+  JSRegExp::Flags flags_;
   bool one_byte_;
   bool reg_exp_too_big_;
   bool limiting_recursion_;
@@ -1041,11 +1072,13 @@
 // Attempts to compile the regexp using an Irregexp code generator.  Returns
 // a fixed array or a null handle depending on whether it succeeded.
 RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
-                               bool ignore_case, bool one_byte)
+                               JSRegExp::Flags flags, bool one_byte)
     : next_register_(2 * (capture_count + 1)),
+      unicode_lookaround_stack_register_(kNoRegister),
+      unicode_lookaround_position_register_(kNoRegister),
       work_list_(NULL),
       recursion_depth_(0),
-      ignore_case_(ignore_case),
+      flags_(flags),
       one_byte_(one_byte),
       reg_exp_too_big_(false),
       limiting_recursion_(false),
@@ -1941,15 +1974,13 @@
 // know that the character is in the range of min_char to max_char inclusive.
 // Either label can be NULL indicating backtracking.  Either label can also be
 // equal to the fall_through label.
-static void GenerateBranches(RegExpMacroAssembler* masm,
-                             ZoneList<int>* ranges,
-                             int start_index,
-                             int end_index,
-                             uc16 min_char,
-                             uc16 max_char,
-                             Label* fall_through,
-                             Label* even_label,
-                             Label* odd_label) {
+static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
+                             int start_index, int end_index, uc32 min_char,
+                             uc32 max_char, Label* fall_through,
+                             Label* even_label, Label* odd_label) {
+  DCHECK_LE(min_char, String::kMaxUtf16CodeUnit);
+  DCHECK_LE(max_char, String::kMaxUtf16CodeUnit);
+
   int first = ranges->at(start_index);
   int last = ranges->at(end_index) - 1;
 
@@ -2098,9 +2129,7 @@
                           Label* on_failure, int cp_offset, bool check_offset,
                           bool preloaded, Zone* zone) {
   ZoneList<CharacterRange>* ranges = cc->ranges(zone);
-  if (!CharacterRange::IsCanonical(ranges)) {
-    CharacterRange::Canonicalize(ranges);
-  }
+  CharacterRange::Canonicalize(ranges);
 
   int max_char;
   if (one_byte) {
@@ -2142,23 +2171,14 @@
     }
     return;
   }
-  if (last_valid_range == 0 &&
-      !cc->is_negated() &&
-      ranges->at(0).IsEverything(max_char)) {
-    // This is a common case hit by non-anchored expressions.
-    if (check_offset) {
-      macro_assembler->CheckPosition(cp_offset, on_failure);
-    }
-    return;
-  }
 
   if (!preloaded) {
     macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
   }
 
   if (cc->is_standard(zone) &&
-        macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
-                                                    on_failure)) {
+      macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
+                                                  on_failure)) {
       return;
   }
 
@@ -2470,12 +2490,14 @@
   } else {
     // For 2-character preloads in one-byte mode or 1-character preloads in
     // two-byte mode we also use a 16 bit load with zero extend.
+    static const uint32_t kTwoByteMask = 0xffff;
+    static const uint32_t kFourByteMask = 0xffffffff;
     if (details->characters() == 2 && compiler->one_byte()) {
-      if ((mask & 0xffff) == 0xffff) need_mask = false;
+      if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
     } else if (details->characters() == 1 && !compiler->one_byte()) {
-      if ((mask & 0xffff) == 0xffff) need_mask = false;
+      if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
     } else {
-      if (mask == 0xffffffff) need_mask = false;
+      if (mask == kFourByteMask) need_mask = false;
     }
   }
 
@@ -2798,9 +2820,7 @@
       DCHECK(elm.text_type() == TextElement::CHAR_CLASS);
       RegExpCharacterClass* cc = elm.char_class();
       ZoneList<CharacterRange>* ranges = cc->ranges(zone());
-      if (!CharacterRange::IsCanonical(ranges)) {
-        CharacterRange::Canonicalize(ranges);
-      }
+      CharacterRange::Canonicalize(ranges);
       // Now they are in order so we only need to look at the first.
       int range_count = ranges->length();
       if (cc->is_negated()) {
@@ -3289,6 +3309,36 @@
 }
 
 
+TextNode* TextNode::CreateForCharacterRanges(Zone* zone,
+                                             ZoneList<CharacterRange>* ranges,
+                                             bool read_backward,
+                                             RegExpNode* on_success) {
+  DCHECK_NOT_NULL(ranges);
+  ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(1, zone);
+  elms->Add(
+      TextElement::CharClass(new (zone) RegExpCharacterClass(ranges, false)),
+      zone);
+  return new (zone) TextNode(elms, read_backward, on_success);
+}
+
+
+TextNode* TextNode::CreateForSurrogatePair(Zone* zone, CharacterRange lead,
+                                           CharacterRange trail,
+                                           bool read_backward,
+                                           RegExpNode* on_success) {
+  ZoneList<CharacterRange>* lead_ranges = CharacterRange::List(zone, lead);
+  ZoneList<CharacterRange>* trail_ranges = CharacterRange::List(zone, trail);
+  ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(2, zone);
+  elms->Add(TextElement::CharClass(
+                new (zone) RegExpCharacterClass(lead_ranges, false)),
+            zone);
+  elms->Add(TextElement::CharClass(
+                new (zone) RegExpCharacterClass(trail_ranges, false)),
+            zone);
+  return new (zone) TextNode(elms, read_backward, on_success);
+}
+
+
 // This generates the code to match a text node.  A text node can contain
 // straight character sequences (possibly to be matched in a case-independent
 // way) and character classes.  For efficiency we do not do this in a single
@@ -3385,10 +3435,7 @@
       // independent case and it slows us down if we don't know that.
       if (cc->is_standard(zone())) continue;
       ZoneList<CharacterRange>* ranges = cc->ranges(zone());
-      int range_count = ranges->length();
-      for (int j = 0; j < range_count; j++) {
-        ranges->at(j).AddCaseEquivalents(isolate, zone(), ranges, is_one_byte);
-      }
+      CharacterRange::AddCaseEquivalents(isolate, zone(), ranges, is_one_byte);
     }
   }
 }
@@ -3405,9 +3452,7 @@
   if (elm.text_type() != TextElement::CHAR_CLASS) return NULL;
   RegExpCharacterClass* node = elm.char_class();
   ZoneList<CharacterRange>* ranges = node->ranges(zone());
-  if (!CharacterRange::IsCanonical(ranges)) {
-    CharacterRange::Canonicalize(ranges);
-  }
+  CharacterRange::Canonicalize(ranges);
   if (node->is_negated()) {
     return ranges->length() == 0 ? on_success() : NULL;
   }
@@ -3554,27 +3599,29 @@
 };
 
 
+static const uc32 kRangeEndMarker = 0x110000;
+
 // The '2' variant is has inclusive from and exclusive to.
 // This covers \s as defined in ECMA-262 5.1, 15.10.2.12,
 // which include WhiteSpace (7.2) or LineTerminator (7.3) values.
-static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1,
-    0x00A0, 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B,
-    0x2028, 0x202A, 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001,
-    0xFEFF, 0xFF00, 0x10000 };
+static const int kSpaceRanges[] = {
+    '\t',   '\r' + 1, ' ',    ' ' + 1, 0x00A0, 0x00A1, 0x1680,         0x1681,
+    0x180E, 0x180F,   0x2000, 0x200B,  0x2028, 0x202A, 0x202F,         0x2030,
+    0x205F, 0x2060,   0x3000, 0x3001,  0xFEFF, 0xFF00, kRangeEndMarker};
 static const int kSpaceRangeCount = arraysize(kSpaceRanges);
 
 static const int kWordRanges[] = {
-    '0', '9' + 1, 'A', 'Z' + 1, '_', '_' + 1, 'a', 'z' + 1, 0x10000 };
+    '0', '9' + 1, 'A', 'Z' + 1, '_', '_' + 1, 'a', 'z' + 1, kRangeEndMarker};
 static const int kWordRangeCount = arraysize(kWordRanges);
-static const int kDigitRanges[] = { '0', '9' + 1, 0x10000 };
+static const int kDigitRanges[] = {'0', '9' + 1, kRangeEndMarker};
 static const int kDigitRangeCount = arraysize(kDigitRanges);
-static const int kSurrogateRanges[] = { 0xd800, 0xe000, 0x10000 };
+static const int kSurrogateRanges[] = {
+    kLeadSurrogateStart, kLeadSurrogateStart + 1, kRangeEndMarker};
 static const int kSurrogateRangeCount = arraysize(kSurrogateRanges);
-static const int kLineTerminatorRanges[] = { 0x000A, 0x000B, 0x000D, 0x000E,
-    0x2028, 0x202A, 0x10000 };
+static const int kLineTerminatorRanges[] = {
+    0x000A, 0x000B, 0x000D, 0x000E, 0x2028, 0x202A, kRangeEndMarker};
 static const int kLineTerminatorRangeCount = arraysize(kLineTerminatorRanges);
 
-
 void BoyerMoorePositionInfo::Set(int character) {
   SetInterval(Interval(character, character));
 }
@@ -3916,6 +3963,11 @@
 void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
   int choice_count = alternatives_->length();
 
+  if (choice_count == 1 && alternatives_->at(0).guards() == NULL) {
+    alternatives_->at(0).node()->Emit(compiler, trace);
+    return;
+  }
+
   AssertGuardsMentionRegisters(trace);
 
   LimitResult limit_result = LimitVersions(compiler, trace);
@@ -4349,14 +4401,19 @@
 
   DCHECK_EQ(start_reg_ + 1, end_reg_);
   if (compiler->ignore_case()) {
-    assembler->CheckNotBackReferenceIgnoreCase(start_reg_, read_backward(),
-                                               trace->backtrack());
+    assembler->CheckNotBackReferenceIgnoreCase(
+        start_reg_, read_backward(), compiler->unicode(), trace->backtrack());
   } else {
     assembler->CheckNotBackReference(start_reg_, read_backward(),
                                      trace->backtrack());
   }
   // We are going to advance backward, so we may end up at the start.
   if (read_backward()) trace->set_at_start(Trace::UNKNOWN);
+
+  // Check that the back reference does not end inside a surrogate pair.
+  if (compiler->unicode() && !compiler->one_byte()) {
+    assembler->CheckNotInSurrogatePair(trace->cp_offset(), trace->backtrack());
+  }
   on_success()->Emit(compiler, trace);
 }
 
@@ -4732,8 +4789,8 @@
 static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
                                  const int* special_class,
                                  int length) {
-  length--;  // Remove final 0x10000.
-  DCHECK(special_class[length] == 0x10000);
+  length--;  // Remove final marker.
+  DCHECK(special_class[length] == kRangeEndMarker);
   DCHECK(ranges->length() != 0);
   DCHECK(length != 0);
   DCHECK(special_class[0] != 0);
@@ -4753,7 +4810,7 @@
       return false;
     }
   }
-  if (range.to() != 0xffff) {
+  if (range.to() != String::kMaxCodePoint) {
     return false;
   }
   return true;
@@ -4763,8 +4820,8 @@
 static bool CompareRanges(ZoneList<CharacterRange>* ranges,
                           const int* special_class,
                           int length) {
-  length--;  // Remove final 0x10000.
-  DCHECK(special_class[length] == 0x10000);
+  length--;  // Remove final marker.
+  DCHECK(special_class[length] == kRangeEndMarker);
   if (ranges->length() * 2 != length) {
     return false;
   }
@@ -4820,10 +4877,303 @@
 }
 
 
+UnicodeRangeSplitter::UnicodeRangeSplitter(Zone* zone,
+                                           ZoneList<CharacterRange>* base)
+    : zone_(zone),
+      table_(zone),
+      bmp_(nullptr),
+      lead_surrogates_(nullptr),
+      trail_surrogates_(nullptr),
+      non_bmp_(nullptr) {
+  // The unicode range splitter categorizes given character ranges into:
+  // - Code points from the BMP representable by one code unit.
+  // - Code points outside the BMP that need to be split into surrogate pairs.
+  // - Lone lead surrogates.
+  // - Lone trail surrogates.
+  // Lone surrogates are valid code points, even though no actual characters.
+  // They require special matching to make sure we do not split surrogate pairs.
+  // We use the dispatch table to accomplish this. The base range is split up
+  // by the table by the overlay ranges, and the Call callback is used to
+  // filter and collect ranges for each category.
+  for (int i = 0; i < base->length(); i++) {
+    table_.AddRange(base->at(i), kBase, zone_);
+  }
+  // Add overlay ranges.
+  table_.AddRange(CharacterRange::Range(0, kLeadSurrogateStart - 1),
+                  kBmpCodePoints, zone_);
+  table_.AddRange(CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd),
+                  kLeadSurrogates, zone_);
+  table_.AddRange(
+      CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
+      kTrailSurrogates, zone_);
+  table_.AddRange(
+      CharacterRange::Range(kTrailSurrogateEnd + 1, kNonBmpStart - 1),
+      kBmpCodePoints, zone_);
+  table_.AddRange(CharacterRange::Range(kNonBmpStart, kNonBmpEnd),
+                  kNonBmpCodePoints, zone_);
+  table_.ForEach(this);
+}
+
+
+void UnicodeRangeSplitter::Call(uc32 from, DispatchTable::Entry entry) {
+  OutSet* outset = entry.out_set();
+  if (!outset->Get(kBase)) return;
+  ZoneList<CharacterRange>** target = NULL;
+  if (outset->Get(kBmpCodePoints)) {
+    target = &bmp_;
+  } else if (outset->Get(kLeadSurrogates)) {
+    target = &lead_surrogates_;
+  } else if (outset->Get(kTrailSurrogates)) {
+    target = &trail_surrogates_;
+  } else {
+    DCHECK(outset->Get(kNonBmpCodePoints));
+    target = &non_bmp_;
+  }
+  if (*target == NULL) *target = new (zone_) ZoneList<CharacterRange>(2, zone_);
+  (*target)->Add(CharacterRange::Range(entry.from(), entry.to()), zone_);
+}
+
+
+void AddBmpCharacters(RegExpCompiler* compiler, ChoiceNode* result,
+                      RegExpNode* on_success, UnicodeRangeSplitter* splitter) {
+  ZoneList<CharacterRange>* bmp = splitter->bmp();
+  if (bmp == nullptr) return;
+  result->AddAlternative(GuardedAlternative(TextNode::CreateForCharacterRanges(
+      compiler->zone(), bmp, compiler->read_backward(), on_success)));
+}
+
+
+void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
+                             RegExpNode* on_success,
+                             UnicodeRangeSplitter* splitter) {
+  ZoneList<CharacterRange>* non_bmp = splitter->non_bmp();
+  if (non_bmp == nullptr) return;
+  DCHECK(compiler->unicode());
+  DCHECK(!compiler->one_byte());
+  Zone* zone = compiler->zone();
+  CharacterRange::Canonicalize(non_bmp);
+  for (int i = 0; i < non_bmp->length(); i++) {
+    // Match surrogate pair.
+    // E.g. [\u10005-\u11005] becomes
+    //      \ud800[\udc05-\udfff]|
+    //      [\ud801-\ud803][\udc00-\udfff]|
+    //      \ud804[\udc00-\udc05]
+    uc32 from = non_bmp->at(i).from();
+    uc32 to = non_bmp->at(i).to();
+    uc16 from_l = unibrow::Utf16::LeadSurrogate(from);
+    uc16 from_t = unibrow::Utf16::TrailSurrogate(from);
+    uc16 to_l = unibrow::Utf16::LeadSurrogate(to);
+    uc16 to_t = unibrow::Utf16::TrailSurrogate(to);
+    if (from_l == to_l) {
+      // The lead surrogate is the same.
+      result->AddAlternative(
+          GuardedAlternative(TextNode::CreateForSurrogatePair(
+              zone, CharacterRange::Singleton(from_l),
+              CharacterRange::Range(from_t, to_t), compiler->read_backward(),
+              on_success)));
+    } else {
+      if (from_t != kTrailSurrogateStart) {
+        // Add [from_l][from_t-\udfff]
+        result->AddAlternative(
+            GuardedAlternative(TextNode::CreateForSurrogatePair(
+                zone, CharacterRange::Singleton(from_l),
+                CharacterRange::Range(from_t, kTrailSurrogateEnd),
+                compiler->read_backward(), on_success)));
+        from_l++;
+      }
+      if (to_t != kTrailSurrogateEnd) {
+        // Add [to_l][\udc00-to_t]
+        result->AddAlternative(
+            GuardedAlternative(TextNode::CreateForSurrogatePair(
+                zone, CharacterRange::Singleton(to_l),
+                CharacterRange::Range(kTrailSurrogateStart, to_t),
+                compiler->read_backward(), on_success)));
+        to_l--;
+      }
+      if (from_l <= to_l) {
+        // Add [from_l-to_l][\udc00-\udfff]
+        result->AddAlternative(
+            GuardedAlternative(TextNode::CreateForSurrogatePair(
+                zone, CharacterRange::Range(from_l, to_l),
+                CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
+                compiler->read_backward(), on_success)));
+      }
+    }
+  }
+}
+
+
+RegExpNode* NegativeLookaroundAgainstReadDirectionAndMatch(
+    RegExpCompiler* compiler, ZoneList<CharacterRange>* lookbehind,
+    ZoneList<CharacterRange>* match, RegExpNode* on_success,
+    bool read_backward) {
+  Zone* zone = compiler->zone();
+  RegExpNode* match_node = TextNode::CreateForCharacterRanges(
+      zone, match, read_backward, on_success);
+  int stack_register = compiler->UnicodeLookaroundStackRegister();
+  int position_register = compiler->UnicodeLookaroundPositionRegister();
+  RegExpLookaround::Builder lookaround(false, match_node, stack_register,
+                                       position_register);
+  RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
+      zone, lookbehind, !read_backward, lookaround.on_match_success());
+  return lookaround.ForMatch(negative_match);
+}
+
+
+RegExpNode* MatchAndNegativeLookaroundInReadDirection(
+    RegExpCompiler* compiler, ZoneList<CharacterRange>* match,
+    ZoneList<CharacterRange>* lookahead, RegExpNode* on_success,
+    bool read_backward) {
+  Zone* zone = compiler->zone();
+  int stack_register = compiler->UnicodeLookaroundStackRegister();
+  int position_register = compiler->UnicodeLookaroundPositionRegister();
+  RegExpLookaround::Builder lookaround(false, on_success, stack_register,
+                                       position_register);
+  RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
+      zone, lookahead, read_backward, lookaround.on_match_success());
+  return TextNode::CreateForCharacterRanges(
+      zone, match, read_backward, lookaround.ForMatch(negative_match));
+}
+
+
+void AddLoneLeadSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
+                           RegExpNode* on_success,
+                           UnicodeRangeSplitter* splitter) {
+  ZoneList<CharacterRange>* lead_surrogates = splitter->lead_surrogates();
+  if (lead_surrogates == nullptr) return;
+  Zone* zone = compiler->zone();
+  // E.g. \ud801 becomes \ud801(?![\udc00-\udfff]).
+  ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
+      zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
+
+  RegExpNode* match;
+  if (compiler->read_backward()) {
+    // Reading backward. Assert that reading forward, there is no trail
+    // surrogate, and then backward match the lead surrogate.
+    match = NegativeLookaroundAgainstReadDirectionAndMatch(
+        compiler, trail_surrogates, lead_surrogates, on_success, true);
+  } else {
+    // Reading forward. Forward match the lead surrogate and assert that
+    // no trail surrogate follows.
+    match = MatchAndNegativeLookaroundInReadDirection(
+        compiler, lead_surrogates, trail_surrogates, on_success, false);
+  }
+  result->AddAlternative(GuardedAlternative(match));
+}
+
+
+void AddLoneTrailSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
+                            RegExpNode* on_success,
+                            UnicodeRangeSplitter* splitter) {
+  ZoneList<CharacterRange>* trail_surrogates = splitter->trail_surrogates();
+  if (trail_surrogates == nullptr) return;
+  Zone* zone = compiler->zone();
+  // E.g. \udc01 becomes (?<![\ud800-\udbff])\udc01
+  ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
+      zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
+
+  RegExpNode* match;
+  if (compiler->read_backward()) {
+    // Reading backward. Backward match the trail surrogate and assert that no
+    // lead surrogate precedes it.
+    match = MatchAndNegativeLookaroundInReadDirection(
+        compiler, trail_surrogates, lead_surrogates, on_success, true);
+  } else {
+    // Reading forward. Assert that reading backward, there is no lead
+    // surrogate, and then forward match the trail surrogate.
+    match = NegativeLookaroundAgainstReadDirectionAndMatch(
+        compiler, lead_surrogates, trail_surrogates, on_success, false);
+  }
+  result->AddAlternative(GuardedAlternative(match));
+}
+
+RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
+                              RegExpNode* on_success) {
+  // This implements ES2015 21.2.5.2.3, AdvanceStringIndex.
+  DCHECK(!compiler->read_backward());
+  Zone* zone = compiler->zone();
+  // Advance any character. If the character happens to be a lead surrogate and
+  // we advanced into the middle of a surrogate pair, it will work out, as
+  // nothing will match from there. We will have to advance again, consuming
+  // the associated trail surrogate.
+  ZoneList<CharacterRange>* range = CharacterRange::List(
+      zone, CharacterRange::Range(0, String::kMaxUtf16CodeUnit));
+  return TextNode::CreateForCharacterRanges(zone, range, false, on_success);
+}
+
+
+void AddUnicodeCaseEquivalents(RegExpCompiler* compiler,
+                               ZoneList<CharacterRange>* ranges) {
+#ifdef V8_I18N_SUPPORT
+  // Use ICU to compute the case fold closure over the ranges.
+  DCHECK(compiler->unicode());
+  DCHECK(compiler->ignore_case());
+  USet* set = uset_openEmpty();
+  for (int i = 0; i < ranges->length(); i++) {
+    uset_addRange(set, ranges->at(i).from(), ranges->at(i).to());
+  }
+  ranges->Clear();
+  uset_closeOver(set, USET_CASE_INSENSITIVE);
+  // Full case mapping map single characters to multiple characters.
+  // Those are represented as strings in the set. Remove them so that
+  // we end up with only simple and common case mappings.
+  uset_removeAllStrings(set);
+  int item_count = uset_getItemCount(set);
+  int item_result = 0;
+  UErrorCode ec = U_ZERO_ERROR;
+  Zone* zone = compiler->zone();
+  for (int i = 0; i < item_count; i++) {
+    uc32 start = 0;
+    uc32 end = 0;
+    item_result += uset_getItem(set, i, &start, &end, nullptr, 0, &ec);
+    ranges->Add(CharacterRange::Range(start, end), zone);
+  }
+  // No errors and everything we collected have been ranges.
+  DCHECK_EQ(U_ZERO_ERROR, ec);
+  DCHECK_EQ(0, item_result);
+  uset_close(set);
+#else
+  // Fallback if ICU is not included.
+  CharacterRange::AddCaseEquivalents(compiler->isolate(), compiler->zone(),
+                                     ranges, compiler->one_byte());
+#endif  // V8_I18N_SUPPORT
+  CharacterRange::Canonicalize(ranges);
+}
+
+
 RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
                                          RegExpNode* on_success) {
-  return new (compiler->zone())
-      TextNode(this, compiler->read_backward(), on_success);
+  set_.Canonicalize();
+  Zone* zone = compiler->zone();
+  ZoneList<CharacterRange>* ranges = this->ranges(zone);
+  if (compiler->unicode() && compiler->ignore_case()) {
+    AddUnicodeCaseEquivalents(compiler, ranges);
+  }
+  if (compiler->unicode() && !compiler->one_byte()) {
+    if (is_negated()) {
+      ZoneList<CharacterRange>* negated =
+          new (zone) ZoneList<CharacterRange>(2, zone);
+      CharacterRange::Negate(ranges, negated, zone);
+      ranges = negated;
+    }
+    if (ranges->length() == 0) {
+      // No matches possible.
+      return new (zone) EndNode(EndNode::BACKTRACK, zone);
+    }
+    if (standard_type() == '*') {
+      return UnanchoredAdvance(compiler, on_success);
+    } else {
+      ChoiceNode* result = new (zone) ChoiceNode(2, zone);
+      UnicodeRangeSplitter splitter(zone, ranges);
+      AddBmpCharacters(compiler, result, on_success, &splitter);
+      AddNonBmpSurrogatePairs(compiler, result, on_success, &splitter);
+      AddLoneLeadSurrogates(compiler, result, on_success, &splitter);
+      AddLoneTrailSurrogates(compiler, result, on_success, &splitter);
+      return result;
+    }
+  } else {
+    return new (zone) TextNode(this, compiler->read_backward(), on_success);
+  }
 }
 
 
@@ -5338,6 +5688,47 @@
 }
 
 
+RegExpLookaround::Builder::Builder(bool is_positive, RegExpNode* on_success,
+                                   int stack_pointer_register,
+                                   int position_register,
+                                   int capture_register_count,
+                                   int capture_register_start)
+    : is_positive_(is_positive),
+      on_success_(on_success),
+      stack_pointer_register_(stack_pointer_register),
+      position_register_(position_register) {
+  if (is_positive_) {
+    on_match_success_ = ActionNode::PositiveSubmatchSuccess(
+        stack_pointer_register, position_register, capture_register_count,
+        capture_register_start, on_success_);
+  } else {
+    Zone* zone = on_success_->zone();
+    on_match_success_ = new (zone) NegativeSubmatchSuccess(
+        stack_pointer_register, position_register, capture_register_count,
+        capture_register_start, zone);
+  }
+}
+
+
+RegExpNode* RegExpLookaround::Builder::ForMatch(RegExpNode* match) {
+  if (is_positive_) {
+    return ActionNode::BeginSubmatch(stack_pointer_register_,
+                                     position_register_, match);
+  } else {
+    Zone* zone = on_success_->zone();
+    // We use a ChoiceNode to represent the negative lookaround. The first
+    // alternative is the negative match. On success, the end node backtracks.
+    // On failure, the second alternative is tried and leads to success.
+    // NegativeLookaheadChoiceNode is a special ChoiceNode that ignores the
+    // first exit when calculating quick checks.
+    ChoiceNode* choice_node = new (zone) NegativeLookaroundChoiceNode(
+        GuardedAlternative(match), GuardedAlternative(on_success_), zone);
+    return ActionNode::BeginSubmatch(stack_pointer_register_,
+                                     position_register_, choice_node);
+  }
+}
+
+
 RegExpNode* RegExpLookaround::ToNode(RegExpCompiler* compiler,
                                      RegExpNode* on_success) {
   int stack_pointer_register = compiler->AllocateRegister();
@@ -5352,35 +5743,10 @@
   RegExpNode* result;
   bool was_reading_backward = compiler->read_backward();
   compiler->set_read_backward(type() == LOOKBEHIND);
-  if (is_positive()) {
-    result = ActionNode::BeginSubmatch(
-        stack_pointer_register, position_register,
-        body()->ToNode(compiler,
-                       ActionNode::PositiveSubmatchSuccess(
-                           stack_pointer_register, position_register,
-                           register_count, register_start, on_success)));
-  } else {
-    // We use a ChoiceNode for a negative lookahead because it has most of
-    // the characteristics we need.  It has the body of the lookahead as its
-    // first alternative and the expression after the lookahead of the second
-    // alternative.  If the first alternative succeeds then the
-    // NegativeSubmatchSuccess will unwind the stack including everything the
-    // choice node set up and backtrack.  If the first alternative fails then
-    // the second alternative is tried, which is exactly the desired result
-    // for a negative lookahead.  The NegativeLookaheadChoiceNode is a special
-    // ChoiceNode that knows to ignore the first exit when calculating quick
-    // checks.
-    Zone* zone = compiler->zone();
-
-    GuardedAlternative body_alt(
-        body()->ToNode(compiler, new (zone) NegativeSubmatchSuccess(
-                                     stack_pointer_register, position_register,
-                                     register_count, register_start, zone)));
-    ChoiceNode* choice_node = new (zone) NegativeLookaroundChoiceNode(
-        body_alt, GuardedAlternative(on_success), zone);
-    result = ActionNode::BeginSubmatch(stack_pointer_register,
-                                       position_register, choice_node);
-  }
+  Builder builder(is_positive(), on_success, stack_pointer_register,
+                  position_register, register_count, register_start);
+  RegExpNode* match = body_->ToNode(compiler, builder.on_match_success());
+  result = builder.ForMatch(match);
   compiler->set_read_backward(was_reading_backward);
   return result;
 }
@@ -5428,10 +5794,10 @@
                      ZoneList<CharacterRange>* ranges,
                      Zone* zone) {
   elmc--;
-  DCHECK(elmv[elmc] == 0x10000);
+  DCHECK(elmv[elmc] == kRangeEndMarker);
   for (int i = 0; i < elmc; i += 2) {
     DCHECK(elmv[i] < elmv[i + 1]);
-    ranges->Add(CharacterRange(elmv[i], elmv[i + 1] - 1), zone);
+    ranges->Add(CharacterRange::Range(elmv[i], elmv[i + 1] - 1), zone);
   }
 }
 
@@ -5441,17 +5807,17 @@
                             ZoneList<CharacterRange>* ranges,
                             Zone* zone) {
   elmc--;
-  DCHECK(elmv[elmc] == 0x10000);
+  DCHECK(elmv[elmc] == kRangeEndMarker);
   DCHECK(elmv[0] != 0x0000);
-  DCHECK(elmv[elmc-1] != String::kMaxUtf16CodeUnit);
+  DCHECK(elmv[elmc - 1] != String::kMaxCodePoint);
   uc16 last = 0x0000;
   for (int i = 0; i < elmc; i += 2) {
     DCHECK(last <= elmv[i] - 1);
     DCHECK(elmv[i] < elmv[i + 1]);
-    ranges->Add(CharacterRange(last, elmv[i] - 1), zone);
+    ranges->Add(CharacterRange::Range(last, elmv[i] - 1), zone);
     last = elmv[i + 1];
   }
-  ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit), zone);
+  ranges->Add(CharacterRange::Range(last, String::kMaxCodePoint), zone);
 }
 
 
@@ -5508,115 +5874,73 @@
 }
 
 
-class CharacterRangeSplitter {
- public:
-  CharacterRangeSplitter(ZoneList<CharacterRange>** included,
-                         ZoneList<CharacterRange>** excluded,
-                         Zone* zone)
-      : included_(included),
-        excluded_(excluded),
-        zone_(zone) { }
-  void Call(uc16 from, DispatchTable::Entry entry);
-
-  static const int kInBase = 0;
-  static const int kInOverlay = 1;
-
- private:
-  ZoneList<CharacterRange>** included_;
-  ZoneList<CharacterRange>** excluded_;
-  Zone* zone_;
-};
-
-
-void CharacterRangeSplitter::Call(uc16 from, DispatchTable::Entry entry) {
-  if (!entry.out_set()->Get(kInBase)) return;
-  ZoneList<CharacterRange>** target = entry.out_set()->Get(kInOverlay)
-    ? included_
-    : excluded_;
-  if (*target == NULL) *target = new(zone_) ZoneList<CharacterRange>(2, zone_);
-  (*target)->Add(CharacterRange(entry.from(), entry.to()), zone_);
-}
-
-
-void CharacterRange::Split(ZoneList<CharacterRange>* base,
-                           Vector<const int> overlay,
-                           ZoneList<CharacterRange>** included,
-                           ZoneList<CharacterRange>** excluded,
-                           Zone* zone) {
-  DCHECK_NULL(*included);
-  DCHECK_NULL(*excluded);
-  DispatchTable table(zone);
-  for (int i = 0; i < base->length(); i++)
-    table.AddRange(base->at(i), CharacterRangeSplitter::kInBase, zone);
-  for (int i = 0; i < overlay.length(); i += 2) {
-    table.AddRange(CharacterRange(overlay[i], overlay[i + 1] - 1),
-                   CharacterRangeSplitter::kInOverlay, zone);
-  }
-  CharacterRangeSplitter callback(included, excluded, zone);
-  table.ForEach(&callback);
-}
-
-
 void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
                                         ZoneList<CharacterRange>* ranges,
                                         bool is_one_byte) {
-  uc16 bottom = from();
-  uc16 top = to();
-  if (is_one_byte && !RangeContainsLatin1Equivalents(*this)) {
-    if (bottom > String::kMaxOneByteCharCode) return;
-    if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
-  }
-  unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
-  if (top == bottom) {
-    // If this is a singleton we just expand the one character.
-    int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
-    for (int i = 0; i < length; i++) {
-      uc32 chr = chars[i];
-      if (chr != bottom) {
-        ranges->Add(CharacterRange::Singleton(chars[i]), zone);
-      }
+  int range_count = ranges->length();
+  for (int i = 0; i < range_count; i++) {
+    CharacterRange range = ranges->at(i);
+    uc32 bottom = range.from();
+    if (bottom > String::kMaxUtf16CodeUnit) return;
+    uc32 top = Min(range.to(), String::kMaxUtf16CodeUnit);
+    // Nothing to be done for surrogates.
+    if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) return;
+    if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
+      if (bottom > String::kMaxOneByteCharCode) return;
+      if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
     }
-  } else {
-    // If this is a range we expand the characters block by block,
-    // expanding contiguous subranges (blocks) one at a time.
-    // The approach is as follows.  For a given start character we
-    // look up the remainder of the block that contains it (represented
-    // by the end point), for instance we find 'z' if the character
-    // is 'c'.  A block is characterized by the property
-    // that all characters uncanonicalize in the same way, except that
-    // each entry in the result is incremented by the distance from the first
-    // element.  So a-z is a block because 'a' uncanonicalizes to ['a', 'A'] and
-    // the k'th letter uncanonicalizes to ['a' + k, 'A' + k].
-    // Once we've found the end point we look up its uncanonicalization
-    // and produce a range for each element.  For instance for [c-f]
-    // we look up ['z', 'Z'] and produce [c-f] and [C-F].  We then only
-    // add a range if it is not already contained in the input, so [c-f]
-    // will be skipped but [C-F] will be added.  If this range is not
-    // completely contained in a block we do this for all the blocks
-    // covered by the range (handling characters that is not in a block
-    // as a "singleton block").
-    unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth];
-    int pos = bottom;
-    while (pos <= top) {
-      int length = isolate->jsregexp_canonrange()->get(pos, '\0', range);
-      uc16 block_end;
-      if (length == 0) {
-        block_end = pos;
-      } else {
-        DCHECK_EQ(1, length);
-        block_end = range[0];
-      }
-      int end = (block_end > top) ? top : block_end;
-      length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0', range);
+    unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+    if (top == bottom) {
+      // If this is a singleton we just expand the one character.
+      int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
       for (int i = 0; i < length; i++) {
-        uc32 c = range[i];
-        uc16 range_from = c - (block_end - pos);
-        uc16 range_to = c - (block_end - end);
-        if (!(bottom <= range_from && range_to <= top)) {
-          ranges->Add(CharacterRange(range_from, range_to), zone);
+        uc32 chr = chars[i];
+        if (chr != bottom) {
+          ranges->Add(CharacterRange::Singleton(chars[i]), zone);
         }
       }
-      pos = end + 1;
+    } else {
+      // If this is a range we expand the characters block by block, expanding
+      // contiguous subranges (blocks) one at a time.  The approach is as
+      // follows.  For a given start character we look up the remainder of the
+      // block that contains it (represented by the end point), for instance we
+      // find 'z' if the character is 'c'.  A block is characterized by the
+      // property that all characters uncanonicalize in the same way, except
+      // that each entry in the result is incremented by the distance from the
+      // first element.  So a-z is a block because 'a' uncanonicalizes to ['a',
+      // 'A'] and the k'th letter uncanonicalizes to ['a' + k, 'A' + k].  Once
+      // we've found the end point we look up its uncanonicalization and
+      // produce a range for each element.  For instance for [c-f] we look up
+      // ['z', 'Z'] and produce [c-f] and [C-F].  We then only add a range if
+      // it is not already contained in the input, so [c-f] will be skipped but
+      // [C-F] will be added.  If this range is not completely contained in a
+      // block we do this for all the blocks covered by the range (handling
+      // characters that is not in a block as a "singleton block").
+      unibrow::uchar equivalents[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+      int pos = bottom;
+      while (pos <= top) {
+        int length =
+            isolate->jsregexp_canonrange()->get(pos, '\0', equivalents);
+        uc32 block_end;
+        if (length == 0) {
+          block_end = pos;
+        } else {
+          DCHECK_EQ(1, length);
+          block_end = equivalents[0];
+        }
+        int end = (block_end > top) ? top : block_end;
+        length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0',
+                                                         equivalents);
+        for (int i = 0; i < length; i++) {
+          uc32 c = equivalents[i];
+          uc32 range_from = c - (block_end - pos);
+          uc32 range_to = c - (block_end - end);
+          if (!(bottom <= range_from && range_to <= top)) {
+            ranges->Add(CharacterRange::Range(range_from, range_to), zone);
+          }
+        }
+        pos = end + 1;
+      }
     }
   }
 }
@@ -5672,8 +5996,8 @@
   // list[0..count] for the result. Returns the number of resulting
   // canonicalized ranges. Inserting a range may collapse existing ranges into
   // fewer ranges, so the return value can be anything in the range 1..count+1.
-  uc16 from = insert.from();
-  uc16 to = insert.to();
+  uc32 from = insert.from();
+  uc32 to = insert.to();
   int start_pos = 0;
   int end_pos = count;
   for (int i = count - 1; i >= 0; i--) {
@@ -5706,7 +6030,7 @@
     CharacterRange to_replace = list->at(start_pos);
     int new_from = Min(to_replace.from(), from);
     int new_to = Max(to_replace.to(), to);
-    list->at(start_pos) = CharacterRange(new_from, new_to);
+    list->at(start_pos) = CharacterRange::Range(new_from, new_to);
     return count;
   }
   // Replace a number of existing ranges from start_pos to end_pos - 1.
@@ -5717,7 +6041,7 @@
   if (end_pos < count) {
     MoveRanges(list, end_pos, start_pos + 1, count - end_pos);
   }
-  list->at(start_pos) = CharacterRange(new_from, new_to);
+  list->at(start_pos) = CharacterRange::Range(new_from, new_to);
   return count - (end_pos - start_pos) + 1;
 }
 
@@ -5773,20 +6097,20 @@
   DCHECK(CharacterRange::IsCanonical(ranges));
   DCHECK_EQ(0, negated_ranges->length());
   int range_count = ranges->length();
-  uc16 from = 0;
+  uc32 from = 0;
   int i = 0;
   if (range_count > 0 && ranges->at(0).from() == 0) {
-    from = ranges->at(0).to();
+    from = ranges->at(0).to() + 1;
     i = 1;
   }
   while (i < range_count) {
     CharacterRange range = ranges->at(i);
-    negated_ranges->Add(CharacterRange(from + 1, range.from() - 1), zone);
-    from = range.to();
+    negated_ranges->Add(CharacterRange::Range(from, range.from() - 1), zone);
+    from = range.to() + 1;
     i++;
   }
-  if (from < String::kMaxUtf16CodeUnit) {
-    negated_ranges->Add(CharacterRange(from + 1, String::kMaxUtf16CodeUnit),
+  if (from < String::kMaxCodePoint) {
+    negated_ranges->Add(CharacterRange::Range(from, String::kMaxCodePoint),
                         zone);
   }
 }
@@ -5838,7 +6162,7 @@
 }
 
 
-const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
+const uc32 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
 
 
 void DispatchTable::AddRange(CharacterRange full_range, int value,
@@ -5866,8 +6190,9 @@
     if (entry->from() < current.from() && entry->to() >= current.from()) {
       // Snap the overlapping range in half around the start point of
       // the range we're adding.
-      CharacterRange left(entry->from(), current.from() - 1);
-      CharacterRange right(current.from(), entry->to());
+      CharacterRange left =
+          CharacterRange::Range(entry->from(), current.from() - 1);
+      CharacterRange right = CharacterRange::Range(current.from(), entry->to());
       // The left part of the overlapping range doesn't overlap.
       // Truncate the whole entry to be just the left part.
       entry->set_to(left.to());
@@ -5919,10 +6244,6 @@
       // we're adding so we can just update it and move the start point
       // of the range we're adding just past it.
       entry->AddValue(value, zone);
-      // Bail out if the last interval ended at 0xFFFF since otherwise
-      // adding 1 will wrap around to 0.
-      if (entry->to() == String::kMaxUtf16CodeUnit)
-        break;
       DCHECK(entry->to() + 1 > current.from());
       current.set_from(entry->to() + 1);
     } else {
@@ -5940,7 +6261,7 @@
 }
 
 
-OutSet* DispatchTable::Get(uc16 value) {
+OutSet* DispatchTable::Get(uc32 value) {
   ZoneSplayTree<Config>::Locator loc;
   if (!tree()->FindGreatestLessThan(value, &loc))
     return empty();
@@ -5990,7 +6311,7 @@
 
 
 void Analysis::VisitText(TextNode* that) {
-  if (ignore_case_) {
+  if (ignore_case()) {
     that->MakeCaseIndependent(isolate(), is_one_byte_);
   }
   EnsureAnalyzed(that->on_success());
@@ -6173,8 +6494,7 @@
 
 
 void AddDispatchRange::Call(uc32 from, DispatchTable::Entry entry) {
-  CharacterRange range(from, entry.to());
-  constructor_->AddRange(range);
+  constructor_->AddRange(CharacterRange::Range(from, entry.to()));
 }
 
 
@@ -6212,16 +6532,16 @@
   for (int i = 0; i < ranges->length(); i++) {
     CharacterRange range = ranges->at(i);
     if (last < range.from())
-      AddRange(CharacterRange(last, range.from() - 1));
+      AddRange(CharacterRange::Range(last, range.from() - 1));
     if (range.to() >= last) {
-      if (range.to() == String::kMaxUtf16CodeUnit) {
+      if (range.to() == String::kMaxCodePoint) {
         return;
       } else {
         last = range.to() + 1;
       }
     }
   }
-  AddRange(CharacterRange(last, String::kMaxUtf16CodeUnit));
+  AddRange(CharacterRange::Range(last, String::kMaxCodePoint));
 }
 
 
@@ -6230,7 +6550,7 @@
   switch (elm.text_type()) {
     case TextElement::ATOM: {
       uc16 c = elm.atom()->data()[0];
-      AddRange(CharacterRange(c, c));
+      AddRange(CharacterRange::Range(c, c));
       break;
     }
     case TextElement::CHAR_CLASS: {
@@ -6257,14 +6577,48 @@
 }
 
 
+RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpCompiler* compiler,
+                                              RegExpNode* on_success) {
+  // If the regexp matching starts within a surrogate pair, step back
+  // to the lead surrogate and start matching from there.
+  DCHECK(!compiler->read_backward());
+  Zone* zone = compiler->zone();
+  ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
+      zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
+  ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
+      zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
+
+  ChoiceNode* optional_step_back = new (zone) ChoiceNode(2, zone);
+
+  int stack_register = compiler->UnicodeLookaroundStackRegister();
+  int position_register = compiler->UnicodeLookaroundPositionRegister();
+  RegExpNode* step_back = TextNode::CreateForCharacterRanges(
+      zone, lead_surrogates, true, on_success);
+  RegExpLookaround::Builder builder(true, step_back, stack_register,
+                                    position_register);
+  RegExpNode* match_trail = TextNode::CreateForCharacterRanges(
+      zone, trail_surrogates, false, builder.on_match_success());
+
+  optional_step_back->AddAlternative(
+      GuardedAlternative(builder.ForMatch(match_trail)));
+  optional_step_back->AddAlternative(GuardedAlternative(on_success));
+
+  return optional_step_back;
+}
+
+
 RegExpEngine::CompilationResult RegExpEngine::Compile(
-    Isolate* isolate, Zone* zone, RegExpCompileData* data, bool ignore_case,
-    bool is_global, bool is_multiline, bool is_sticky, Handle<String> pattern,
+    Isolate* isolate, Zone* zone, RegExpCompileData* data,
+    JSRegExp::Flags flags, Handle<String> pattern,
     Handle<String> sample_subject, bool is_one_byte) {
   if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
     return IrregexpRegExpTooBig(isolate);
   }
-  RegExpCompiler compiler(isolate, zone, data->capture_count, ignore_case,
+  bool ignore_case = flags & JSRegExp::kIgnoreCase;
+  bool is_sticky = flags & JSRegExp::kSticky;
+  bool is_global = flags & JSRegExp::kGlobal;
+  bool is_unicode = flags & JSRegExp::kUnicode;
+  RegExpCompiler compiler(isolate, zone, data->capture_count, flags,
                           is_one_byte);
 
   if (compiler.optimize()) compiler.set_optimize(!TooMuchRegExpCode(pattern));
@@ -6316,11 +6670,13 @@
     if (node != NULL) {
       node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, ignore_case);
     }
+  } else if (compiler.unicode() && (is_global || is_sticky)) {
+    node = OptionallyStepBackToLeadSurrogate(&compiler, node);
   }
 
   if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone);
   data->node = node;
-  Analysis analysis(isolate, ignore_case, is_one_byte);
+  Analysis analysis(isolate, flags, is_one_byte);
   analysis.EnsureAnalyzed(node);
   if (analysis.has_failed()) {
     const char* error_message = analysis.error_message();
@@ -6381,10 +6737,13 @@
   }
 
   if (is_global) {
-    macro_assembler.set_global_mode(
-        (data->tree->min_match() > 0)
-            ? RegExpMacroAssembler::GLOBAL_NO_ZERO_LENGTH_CHECK
-            : RegExpMacroAssembler::GLOBAL);
+    RegExpMacroAssembler::GlobalMode mode = RegExpMacroAssembler::GLOBAL;
+    if (data->tree->min_match() > 0) {
+      mode = RegExpMacroAssembler::GLOBAL_NO_ZERO_LENGTH_CHECK;
+    } else if (is_unicode) {
+      mode = RegExpMacroAssembler::GLOBAL_UNICODE;
+    }
+    macro_assembler.set_global_mode(mode);
   }
 
   return compiler.Assemble(&macro_assembler,
diff --git a/src/regexp/jsregexp.h b/src/regexp/jsregexp.h
index 0ad4b79..e55d650 100644
--- a/src/regexp/jsregexp.h
+++ b/src/regexp/jsregexp.h
@@ -8,6 +8,7 @@
 #include "src/allocation.h"
 #include "src/assembler.h"
 #include "src/regexp/regexp-ast.h"
+#include "src/regexp/regexp-macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -121,7 +122,6 @@
    public:
     GlobalCache(Handle<JSRegExp> regexp,
                 Handle<String> subject,
-                bool is_global,
                 Isolate* isolate);
 
     INLINE(~GlobalCache());
@@ -137,6 +137,8 @@
     INLINE(bool HasException()) { return num_matches_ < 0; }
 
    private:
+    int AdvanceZeroLength(int last_index);
+
     int num_matches_;
     int max_matches_;
     int current_match_index_;
@@ -265,28 +267,30 @@
   class Entry {
    public:
     Entry() : from_(0), to_(0), out_set_(NULL) { }
-    Entry(uc16 from, uc16 to, OutSet* out_set)
-        : from_(from), to_(to), out_set_(out_set) { }
-    uc16 from() { return from_; }
-    uc16 to() { return to_; }
-    void set_to(uc16 value) { to_ = value; }
+    Entry(uc32 from, uc32 to, OutSet* out_set)
+        : from_(from), to_(to), out_set_(out_set) {
+      DCHECK(from <= to);
+    }
+    uc32 from() { return from_; }
+    uc32 to() { return to_; }
+    void set_to(uc32 value) { to_ = value; }
     void AddValue(int value, Zone* zone) {
       out_set_ = out_set_->Extend(value, zone);
     }
     OutSet* out_set() { return out_set_; }
    private:
-    uc16 from_;
-    uc16 to_;
+    uc32 from_;
+    uc32 to_;
     OutSet* out_set_;
   };
 
   class Config {
    public:
-    typedef uc16 Key;
+    typedef uc32 Key;
     typedef Entry Value;
-    static const uc16 kNoKey;
+    static const uc32 kNoKey;
     static const Entry NoValue() { return Value(); }
-    static inline int Compare(uc16 a, uc16 b) {
+    static inline int Compare(uc32 a, uc32 b) {
       if (a == b)
         return 0;
       else if (a < b)
@@ -297,7 +301,7 @@
   };
 
   void AddRange(CharacterRange range, int value, Zone* zone);
-  OutSet* Get(uc16 value);
+  OutSet* Get(uc32 value);
   void Dump();
 
   template <typename Callback>
@@ -315,6 +319,34 @@
 };
 
 
+// Categorizes character ranges into BMP, non-BMP, lead, and trail surrogates.
+class UnicodeRangeSplitter {
+ public:
+  UnicodeRangeSplitter(Zone* zone, ZoneList<CharacterRange>* base);
+  void Call(uc32 from, DispatchTable::Entry entry);
+
+  ZoneList<CharacterRange>* bmp() { return bmp_; }
+  ZoneList<CharacterRange>* lead_surrogates() { return lead_surrogates_; }
+  ZoneList<CharacterRange>* trail_surrogates() { return trail_surrogates_; }
+  ZoneList<CharacterRange>* non_bmp() const { return non_bmp_; }
+
+ private:
+  static const int kBase = 0;
+  // Separate ranges into
+  static const int kBmpCodePoints = 1;
+  static const int kLeadSurrogates = 2;
+  static const int kTrailSurrogates = 3;
+  static const int kNonBmpCodePoints = 4;
+
+  Zone* zone_;
+  DispatchTable table_;
+  ZoneList<CharacterRange>* bmp_;
+  ZoneList<CharacterRange>* lead_surrogates_;
+  ZoneList<CharacterRange>* trail_surrogates_;
+  ZoneList<CharacterRange>* non_bmp_;
+};
+
+
 #define FOR_EACH_NODE_TYPE(VISIT)                                    \
   VISIT(End)                                                         \
   VISIT(Action)                                                      \
@@ -690,6 +722,17 @@
         read_backward_(read_backward) {
     elms_->Add(TextElement::CharClass(that), zone());
   }
+  // Create TextNode for a single character class for the given ranges.
+  static TextNode* CreateForCharacterRanges(Zone* zone,
+                                            ZoneList<CharacterRange>* ranges,
+                                            bool read_backward,
+                                            RegExpNode* on_success);
+  // Create TextNode for a surrogate pair with a range given for the
+  // lead and the trail surrogate each.
+  static TextNode* CreateForSurrogatePair(Zone* zone, CharacterRange lead,
+                                          CharacterRange trail,
+                                          bool read_backward,
+                                          RegExpNode* on_success);
   virtual void Accept(NodeVisitor* visitor);
   virtual void Emit(RegExpCompiler* compiler, Trace* trace);
   virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
@@ -813,8 +856,7 @@
 class EndNode: public RegExpNode {
  public:
   enum Action { ACCEPT, BACKTRACK, NEGATIVE_SUBMATCH_SUCCESS };
-  explicit EndNode(Action action, Zone* zone)
-      : RegExpNode(zone), action_(action) { }
+  EndNode(Action action, Zone* zone) : RegExpNode(zone), action_(action) {}
   virtual void Accept(NodeVisitor* visitor);
   virtual void Emit(RegExpCompiler* compiler, Trace* trace);
   virtual int EatsAtLeast(int still_to_find,
@@ -1440,9 +1482,9 @@
 //   +-------+        +------------+
 class Analysis: public NodeVisitor {
  public:
-  Analysis(Isolate* isolate, bool ignore_case, bool is_one_byte)
+  Analysis(Isolate* isolate, JSRegExp::Flags flags, bool is_one_byte)
       : isolate_(isolate),
-        ignore_case_(ignore_case),
+        flags_(flags),
         is_one_byte_(is_one_byte),
         error_message_(NULL) {}
   void EnsureAnalyzed(RegExpNode* node);
@@ -1464,9 +1506,12 @@
 
   Isolate* isolate() const { return isolate_; }
 
+  bool ignore_case() const { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
+  bool unicode() const { return (flags_ & JSRegExp::kUnicode) != 0; }
+
  private:
   Isolate* isolate_;
-  bool ignore_case_;
+  JSRegExp::Flags flags_;
   bool is_one_byte_;
   const char* error_message_;
 
@@ -1505,8 +1550,8 @@
   };
 
   static CompilationResult Compile(Isolate* isolate, Zone* zone,
-                                   RegExpCompileData* input, bool ignore_case,
-                                   bool global, bool multiline, bool sticky,
+                                   RegExpCompileData* input,
+                                   JSRegExp::Flags flags,
                                    Handle<String> pattern,
                                    Handle<String> sample_subject,
                                    bool is_one_byte);
diff --git a/src/regexp/mips/regexp-macro-assembler-mips.cc b/src/regexp/mips/regexp-macro-assembler-mips.cc
index 9c59328..6197f45 100644
--- a/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -215,7 +215,7 @@
 
 
 void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
-    int start_reg, bool read_backward, Label* on_no_match) {
+    int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
   Label fallthrough;
   __ lw(a0, register_location(start_reg));  // Index of start of capture.
   __ lw(a1, register_location(start_reg + 1));  // Index of end of capture.
@@ -310,7 +310,7 @@
     //   a0: Address byte_offset1 - Address captured substring's start.
     //   a1: Address byte_offset2 - Address of current character position.
     //   a2: size_t byte_length - length of capture in bytes(!).
-    //   a3: Isolate* isolate.
+    //   a3: Isolate* isolate or 0 if unicode flag.
 
     // Address of start of capture.
     __ Addu(a0, a0, Operand(end_of_input_address()));
@@ -324,7 +324,14 @@
       __ Subu(a1, a1, Operand(s3));
     }
     // Isolate.
-    __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+#ifdef V8_I18N_SUPPORT
+    if (unicode) {
+      __ mov(a3, zero_reg);
+    } else  // NOLINT
+#endif      // V8_I18N_SUPPORT
+    {
+      __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+    }
 
     {
       AllowExternalCallThatCantCauseGC scope(masm_);
@@ -801,9 +808,12 @@
           __ Branch(&exit_label_, eq, current_input_offset(),
                     Operand(zero_reg));
           // Advance current position after a zero-length match.
+          Label advance;
+          __ bind(&advance);
           __ Addu(current_input_offset(),
                   current_input_offset(),
                   Operand((mode_ == UC16) ? 2 : 1));
+          if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
         }
 
         __ Branch(&load_char_start_regexp);
diff --git a/src/regexp/mips/regexp-macro-assembler-mips.h b/src/regexp/mips/regexp-macro-assembler-mips.h
index 902e220..6dedb1e 100644
--- a/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -37,7 +37,7 @@
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match);
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               bool read_backward,
+                                               bool read_backward, bool unicode,
                                                Label* on_no_match);
   virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
   virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 5153bd0..bf95a9c 100644
--- a/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -251,7 +251,7 @@
 
 
 void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
-    int start_reg, bool read_backward, Label* on_no_match) {
+    int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
   Label fallthrough;
   __ ld(a0, register_location(start_reg));  // Index of start of capture.
   __ ld(a1, register_location(start_reg + 1));  // Index of end of capture.
@@ -346,7 +346,7 @@
     //   a0: Address byte_offset1 - Address captured substring's start.
     //   a1: Address byte_offset2 - Address of current character position.
     //   a2: size_t byte_length - length of capture in bytes(!).
-    //   a3: Isolate* isolate.
+    //   a3: Isolate* isolate or 0 if unicode flag.
 
     // Address of start of capture.
     __ Daddu(a0, a0, Operand(end_of_input_address()));
@@ -360,7 +360,14 @@
       __ Dsubu(a1, a1, Operand(s3));
     }
     // Isolate.
-    __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+#ifdef V8_I18N_SUPPORT
+    if (unicode) {
+      __ mov(a3, zero_reg);
+    } else  // NOLINT
+#endif      // V8_I18N_SUPPORT
+    {
+      __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+    }
 
     {
       AllowExternalCallThatCantCauseGC scope(masm_);
@@ -664,10 +671,7 @@
         s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
     RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
 
-    if (kMipsAbi == kN64) {
-      // TODO(plind): Should probably alias a4-a7, for clarity.
-      argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit();
-    }
+    argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit();
 
     __ MultiPush(argument_registers | registers_to_retain | ra.bit());
     // Set frame pointer in space for it if this is not a direct call
@@ -841,9 +845,12 @@
           __ Branch(&exit_label_, eq, current_input_offset(),
                     Operand(zero_reg));
           // Advance current position after a zero-length match.
+          Label advance;
+          __ bind(&advance);
           __ Daddu(current_input_offset(),
                   current_input_offset(),
                   Operand((mode_ == UC16) ? 2 : 1));
+          if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
         }
 
         __ Branch(&load_char_start_regexp);
diff --git a/src/regexp/mips64/regexp-macro-assembler-mips64.h b/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 9a8ca17..df2c6c5 100644
--- a/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -37,7 +37,7 @@
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match);
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               bool read_backward,
+                                               bool read_backward, bool unicode,
                                                Label* on_no_match);
   virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
   virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -96,7 +96,6 @@
   void print_regexp_frame_constants();
 
  private:
-#if defined(MIPS_ABI_N64)
   // Offsets from frame_pointer() of function parameters and stored registers.
   static const int kFramePointer = 0;
 
@@ -105,7 +104,7 @@
   static const int kStoredRegisters = kFramePointer;
   // Return address (stored from link register, read into pc on return).
 
-// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
+  // TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
 
   static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
   static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
@@ -131,43 +130,6 @@
   // First register address. Following registers are below it on the stack.
   static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
 
-#elif defined(MIPS_ABI_O32)
-  // Offsets from frame_pointer() of function parameters and stored registers.
-  static const int kFramePointer = 0;
-
-  // Above the frame pointer - Stored registers and stack passed parameters.
-  // Registers s0 to s7, fp, and ra.
-  static const int kStoredRegisters = kFramePointer;
-  // Return address (stored from link register, read into pc on return).
-  static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
-  static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
-  // Stack frame header.
-  static const int kStackFrameHeader = kReturnAddress + kPointerSize;
-  // Stack parameters placed by caller.
-  static const int kRegisterOutput =
-      kStackFrameHeader + 4 * kPointerSize + kPointerSize;
-  static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
-  static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
-  static const int kDirectCall = kStackHighEnd + kPointerSize;
-  static const int kIsolate = kDirectCall + kPointerSize;
-
-  // Below the frame pointer.
-  // Register parameters stored by setup code.
-  static const int kInputEnd = kFramePointer - kPointerSize;
-  static const int kInputStart = kInputEnd - kPointerSize;
-  static const int kStartIndex = kInputStart - kPointerSize;
-  static const int kInputString = kStartIndex - kPointerSize;
-  // When adding local variables remember to push space for them in
-  // the frame in GetCode.
-  static const int kSuccessfulCaptures = kInputString - kPointerSize;
-  static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
-  // First register address. Following registers are below it on the stack.
-  static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
-
-#else
-# error "undefined MIPS ABI"
-#endif
-
   // Initial size of code buffer.
   static const size_t kRegExpCodeSize = 1024;
 
diff --git a/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index f3ddf7b..c05c580 100644
--- a/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -225,9 +225,8 @@
   BranchOrBacktrack(eq, on_equal);
 }
 
-
 void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
-    int start_reg, bool read_backward, Label* on_no_match) {
+    int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
   Label fallthrough;
   __ LoadP(r3, register_location(start_reg), r0);  // Index of start of capture
   __ LoadP(r4, register_location(start_reg + 1), r0);  // Index of end
@@ -322,7 +321,7 @@
     //   r3: Address byte_offset1 - Address captured substring's start.
     //   r4: Address byte_offset2 - Address of current character position.
     //   r5: size_t byte_length - length of capture in bytes(!)
-    //   r6: Isolate* isolate
+    //   r6: Isolate* isolate or 0 if unicode flag.
 
     // Address of start of capture.
     __ add(r3, r3, end_of_input_address());
@@ -336,7 +335,14 @@
       __ sub(r4, r4, r25);
     }
     // Isolate.
-    __ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
+#ifdef V8_I18N_SUPPORT
+    if (unicode) {
+      __ li(r6, Operand::Zero());
+    } else  // NOLINT
+#endif      // V8_I18N_SUPPORT
+    {
+      __ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
+    }
 
     {
       AllowExternalCallThatCantCauseGC scope(masm_);
@@ -845,8 +851,11 @@
           __ cmpi(current_input_offset(), Operand::Zero());
           __ beq(&exit_label_);
           // Advance current position after a zero-length match.
+          Label advance;
+          __ bind(&advance);
           __ addi(current_input_offset(), current_input_offset(),
                   Operand((mode_ == UC16) ? 2 : 1));
+          if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
         }
 
         __ b(&load_char_start_regexp);
diff --git a/src/regexp/ppc/regexp-macro-assembler-ppc.h b/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 4d1836f..d281387 100644
--- a/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -38,7 +38,7 @@
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match);
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               bool read_backward,
+                                               bool read_backward, bool unicode,
                                                Label* on_no_match);
   virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
   virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
diff --git a/src/regexp/regexp-ast.cc b/src/regexp/regexp-ast.cc
index 31c93b1..b5c2bb6 100644
--- a/src/regexp/regexp-ast.cc
+++ b/src/regexp/regexp-ast.cc
@@ -172,9 +172,9 @@
 
 
 void RegExpUnparser::VisitCharacterRange(CharacterRange that) {
-  os_ << AsUC16(that.from());
+  os_ << AsUC32(that.from());
   if (!that.IsSingleton()) {
-    os_ << "-" << AsUC16(that.to());
+    os_ << "-" << AsUC32(that.to());
   }
 }
 
diff --git a/src/regexp/regexp-ast.h b/src/regexp/regexp-ast.h
index f877785..0e718d3 100644
--- a/src/regexp/regexp-ast.h
+++ b/src/regexp/regexp-ast.h
@@ -5,6 +5,7 @@
 #ifndef V8_REGEXP_REGEXP_AST_H_
 #define V8_REGEXP_REGEXP_AST_H_
 
+#include "src/objects.h"
 #include "src/utils.h"
 #include "src/zone.h"
 
@@ -77,33 +78,38 @@
   CharacterRange() : from_(0), to_(0) {}
   // For compatibility with the CHECK_OK macro
   CharacterRange(void* null) { DCHECK_NULL(null); }  // NOLINT
-  CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) {}
   static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges,
                              Zone* zone);
   static Vector<const int> GetWordBounds();
-  static inline CharacterRange Singleton(uc16 value) {
+  static inline CharacterRange Singleton(uc32 value) {
     return CharacterRange(value, value);
   }
-  static inline CharacterRange Range(uc16 from, uc16 to) {
-    DCHECK(from <= to);
+  static inline CharacterRange Range(uc32 from, uc32 to) {
+    DCHECK(0 <= from && to <= String::kMaxCodePoint);
+    DCHECK(static_cast<uint32_t>(from) <= static_cast<uint32_t>(to));
     return CharacterRange(from, to);
   }
   static inline CharacterRange Everything() {
-    return CharacterRange(0, 0xFFFF);
+    return CharacterRange(0, String::kMaxCodePoint);
   }
-  bool Contains(uc16 i) { return from_ <= i && i <= to_; }
-  uc16 from() const { return from_; }
-  void set_from(uc16 value) { from_ = value; }
-  uc16 to() const { return to_; }
-  void set_to(uc16 value) { to_ = value; }
+  static inline ZoneList<CharacterRange>* List(Zone* zone,
+                                               CharacterRange range) {
+    ZoneList<CharacterRange>* list =
+        new (zone) ZoneList<CharacterRange>(1, zone);
+    list->Add(range, zone);
+    return list;
+  }
+  bool Contains(uc32 i) { return from_ <= i && i <= to_; }
+  uc32 from() const { return from_; }
+  void set_from(uc32 value) { from_ = value; }
+  uc32 to() const { return to_; }
+  void set_to(uc32 value) { to_ = value; }
   bool is_valid() { return from_ <= to_; }
   bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
   bool IsSingleton() { return (from_ == to_); }
-  void AddCaseEquivalents(Isolate* isolate, Zone* zone,
-                          ZoneList<CharacterRange>* ranges, bool is_one_byte);
-  static void Split(ZoneList<CharacterRange>* base, Vector<const int> overlay,
-                    ZoneList<CharacterRange>** included,
-                    ZoneList<CharacterRange>** excluded, Zone* zone);
+  static void AddCaseEquivalents(Isolate* isolate, Zone* zone,
+                                 ZoneList<CharacterRange>* ranges,
+                                 bool is_one_byte);
   // Whether a range list is in canonical form: Ranges ordered by from value,
   // and ranges non-overlapping and non-adjacent.
   static bool IsCanonical(ZoneList<CharacterRange>* ranges);
@@ -119,8 +125,10 @@
   static const int kPayloadMask = (1 << 24) - 1;
 
  private:
-  uc16 from_;
-  uc16 to_;
+  CharacterRange(uc32 from, uc32 to) : from_(from), to_(to) {}
+
+  uc32 from_;
+  uc32 to_;
 };
 
 
@@ -303,8 +311,8 @@
   // W : non-ASCII word character
   // d : ASCII digit
   // D : non-ASCII digit
-  // . : non-unicode non-newline
-  // * : All characters
+  // . : non-newline
+  // * : All characters, for advancing unanchored regexp
   uc16 standard_type() { return set_.standard_set_type(); }
   ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
   bool is_negated() { return is_negated_; }
@@ -451,6 +459,22 @@
   int capture_from() { return capture_from_; }
   Type type() { return type_; }
 
+  class Builder {
+   public:
+    Builder(bool is_positive, RegExpNode* on_success,
+            int stack_pointer_register, int position_register,
+            int capture_register_count = 0, int capture_register_start = 0);
+    RegExpNode* on_match_success() { return on_match_success_; }
+    RegExpNode* ForMatch(RegExpNode* match);
+
+   private:
+    bool is_positive_;
+    RegExpNode* on_match_success_;
+    RegExpNode* on_success_;
+    int stack_pointer_register_;
+    int position_register_;
+  };
+
  private:
   RegExpTree* body_;
   bool is_positive_;
diff --git a/src/regexp/regexp-macro-assembler-irregexp-inl.h b/src/regexp/regexp-macro-assembler-irregexp-inl.h
index 4d0b1bc..a602129 100644
--- a/src/regexp/regexp-macro-assembler-irregexp-inl.h
+++ b/src/regexp/regexp-macro-assembler-irregexp-inl.h
@@ -5,14 +5,14 @@
 #ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
 #define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
 
+#ifdef V8_INTERPRETED_REGEXP
+
 #include "src/ast/ast.h"
 #include "src/regexp/bytecodes-irregexp.h"
 
 namespace v8 {
 namespace internal {
 
-#ifdef V8_INTERPRETED_REGEXP
-
 void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
                                         uint32_t twenty_four_bits) {
   uint32_t word = ((twenty_four_bits << BYTECODE_SHIFT) | byte);
@@ -54,9 +54,9 @@
   pc_ += 4;
 }
 
-#endif  // V8_INTERPRETED_REGEXP
-
 }  // namespace internal
 }  // namespace v8
 
+#endif  // V8_INTERPRETED_REGEXP
+
 #endif  // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
diff --git a/src/regexp/regexp-macro-assembler-irregexp.cc b/src/regexp/regexp-macro-assembler-irregexp.cc
index 751ee44..a0bb5e7 100644
--- a/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/src/regexp/regexp-macro-assembler-irregexp.cc
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#ifdef V8_INTERPRETED_REGEXP
+
 #include "src/regexp/regexp-macro-assembler-irregexp.h"
 
 #include "src/ast/ast.h"
@@ -9,12 +11,9 @@
 #include "src/regexp/regexp-macro-assembler.h"
 #include "src/regexp/regexp-macro-assembler-irregexp-inl.h"
 
-
 namespace v8 {
 namespace internal {
 
-#ifdef V8_INTERPRETED_REGEXP
-
 RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Isolate* isolate,
                                                            Vector<byte> buffer,
                                                            Zone* zone)
@@ -382,11 +381,13 @@
 
 
 void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
-    int start_reg, bool read_backward, Label* on_not_equal) {
+    int start_reg, bool read_backward, bool unicode, Label* on_not_equal) {
   DCHECK(start_reg >= 0);
   DCHECK(start_reg <= kMaxRegister);
-  Emit(read_backward ? BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD
-                     : BC_CHECK_NOT_BACK_REF_NO_CASE,
+  Emit(read_backward ? (unicode ? BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD
+                                : BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD)
+                     : (unicode ? BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE
+                                : BC_CHECK_NOT_BACK_REF_NO_CASE),
        start_reg);
   EmitOrLink(on_not_equal);
 }
@@ -454,7 +455,7 @@
   }
 }
 
-#endif  // V8_INTERPRETED_REGEXP
-
 }  // namespace internal
 }  // namespace v8
+
+#endif  // V8_INTERPRETED_REGEXP
diff --git a/src/regexp/regexp-macro-assembler-irregexp.h b/src/regexp/regexp-macro-assembler-irregexp.h
index f1ace63..dad2e9a 100644
--- a/src/regexp/regexp-macro-assembler-irregexp.h
+++ b/src/regexp/regexp-macro-assembler-irregexp.h
@@ -5,13 +5,13 @@
 #ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
 #define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
 
+#ifdef V8_INTERPRETED_REGEXP
+
 #include "src/regexp/regexp-macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
-#ifdef V8_INTERPRETED_REGEXP
-
 // A light-weight assembler for the Irregexp byte code.
 class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
  public:
@@ -85,7 +85,7 @@
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match);
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               bool read_backward,
+                                               bool read_backward, bool unicode,
                                                Label* on_no_match);
   virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
   virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
@@ -125,9 +125,9 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
 };
 
-#endif  // V8_INTERPRETED_REGEXP
-
 }  // namespace internal
 }  // namespace v8
 
+#endif  // V8_INTERPRETED_REGEXP
+
 #endif  // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
diff --git a/src/regexp/regexp-macro-assembler-tracer.cc b/src/regexp/regexp-macro-assembler-tracer.cc
index 5301ead..ec86526 100644
--- a/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/src/regexp/regexp-macro-assembler-tracer.cc
@@ -360,11 +360,11 @@
 
 
 void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
-    int start_reg, bool read_backward, Label* on_no_match) {
-  PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, %s, label[%08x]);\n",
+    int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+  PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, %s %s, label[%08x]);\n",
          start_reg, read_backward ? "backward" : "forward",
-         LabelToInt(on_no_match));
-  assembler_->CheckNotBackReferenceIgnoreCase(start_reg, read_backward,
+         unicode ? "unicode" : "non-unicode", LabelToInt(on_no_match));
+  assembler_->CheckNotBackReferenceIgnoreCase(start_reg, read_backward, unicode,
                                               on_no_match);
 }
 
diff --git a/src/regexp/regexp-macro-assembler-tracer.h b/src/regexp/regexp-macro-assembler-tracer.h
index 77377aa..8a9ebe3 100644
--- a/src/regexp/regexp-macro-assembler-tracer.h
+++ b/src/regexp/regexp-macro-assembler-tracer.h
@@ -34,7 +34,7 @@
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match);
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               bool read_backward,
+                                               bool read_backward, bool unicode,
                                                Label* on_no_match);
   virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
   virtual void CheckNotCharacterAfterAnd(unsigned c,
diff --git a/src/regexp/regexp-macro-assembler.cc b/src/regexp/regexp-macro-assembler.cc
index caf8b51..9bb5073 100644
--- a/src/regexp/regexp-macro-assembler.cc
+++ b/src/regexp/regexp-macro-assembler.cc
@@ -9,6 +9,10 @@
 #include "src/regexp/regexp-stack.h"
 #include "src/simulator.h"
 
+#ifdef V8_I18N_SUPPORT
+#include "unicode/uchar.h"
+#endif  // V8_I18N_SUPPORT
+
 namespace v8 {
 namespace internal {
 
@@ -23,6 +27,80 @@
 }
 
 
+int RegExpMacroAssembler::CaseInsensitiveCompareUC16(Address byte_offset1,
+                                                     Address byte_offset2,
+                                                     size_t byte_length,
+                                                     Isolate* isolate) {
+  unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
+      isolate->regexp_macro_assembler_canonicalize();
+  // This function is not allowed to cause a garbage collection.
+  // A GC might move the calling generated code and invalidate the
+  // return address on the stack.
+  DCHECK(byte_length % 2 == 0);
+  uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
+  uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
+  size_t length = byte_length >> 1;
+
+#ifdef V8_I18N_SUPPORT
+  if (isolate == nullptr) {
+    for (size_t i = 0; i < length; i++) {
+      uc32 c1 = substring1[i];
+      uc32 c2 = substring2[i];
+      if (unibrow::Utf16::IsLeadSurrogate(c1)) {
+        // Non-BMP characters do not have case-equivalents in the BMP.
+        // Both have to be non-BMP for them to be able to match.
+        if (!unibrow::Utf16::IsLeadSurrogate(c2)) return 0;
+        if (i + 1 < length) {
+          uc16 c1t = substring1[i + 1];
+          uc16 c2t = substring2[i + 1];
+          if (unibrow::Utf16::IsTrailSurrogate(c1t) &&
+              unibrow::Utf16::IsTrailSurrogate(c2t)) {
+            c1 = unibrow::Utf16::CombineSurrogatePair(c1, c1t);
+            c2 = unibrow::Utf16::CombineSurrogatePair(c2, c2t);
+            i++;
+          }
+        }
+      }
+      c1 = u_foldCase(c1, U_FOLD_CASE_DEFAULT);
+      c2 = u_foldCase(c2, U_FOLD_CASE_DEFAULT);
+      if (c1 != c2) return 0;
+    }
+    return 1;
+  }
+#endif  // V8_I18N_SUPPORT
+  DCHECK_NOT_NULL(isolate);
+  for (size_t i = 0; i < length; i++) {
+    unibrow::uchar c1 = substring1[i];
+    unibrow::uchar c2 = substring2[i];
+    if (c1 != c2) {
+      unibrow::uchar s1[1] = {c1};
+      canonicalize->get(c1, '\0', s1);
+      if (s1[0] != c2) {
+        unibrow::uchar s2[1] = {c2};
+        canonicalize->get(c2, '\0', s2);
+        if (s1[0] != s2[0]) {
+          return 0;
+        }
+      }
+    }
+  }
+  return 1;
+}
+
+
+void RegExpMacroAssembler::CheckNotInSurrogatePair(int cp_offset,
+                                                   Label* on_failure) {
+  Label ok;
+  // Check that current character is not a trail surrogate.
+  LoadCurrentCharacter(cp_offset, &ok);
+  CheckCharacterNotInRange(kTrailSurrogateStart, kTrailSurrogateEnd, &ok);
+  // Check that previous character is not a lead surrogate.
+  LoadCurrentCharacter(cp_offset - 1, &ok);
+  CheckCharacterInRange(kLeadSurrogateStart, kLeadSurrogateEnd, on_failure);
+  Bind(&ok);
+}
+
+
 #ifndef V8_INTERPRETED_REGEXP  // Avoid unused code, e.g., on ARM.
 
 NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Isolate* isolate,
@@ -245,40 +323,6 @@
 };
 
 
-int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
-    Address byte_offset1,
-    Address byte_offset2,
-    size_t byte_length,
-    Isolate* isolate) {
-  unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
-      isolate->regexp_macro_assembler_canonicalize();
-  // This function is not allowed to cause a garbage collection.
-  // A GC might move the calling generated code and invalidate the
-  // return address on the stack.
-  DCHECK(byte_length % 2 == 0);
-  uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
-  uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
-  size_t length = byte_length >> 1;
-
-  for (size_t i = 0; i < length; i++) {
-    unibrow::uchar c1 = substring1[i];
-    unibrow::uchar c2 = substring2[i];
-    if (c1 != c2) {
-      unibrow::uchar s1[1] = { c1 };
-      canonicalize->get(c1, '\0', s1);
-      if (s1[0] != c2) {
-        unibrow::uchar s2[1] = { c2 };
-        canonicalize->get(c2, '\0', s2);
-        if (s1[0] != s2[0]) {
-          return 0;
-        }
-      }
-    }
-  }
-  return 1;
-}
-
-
 Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
                                               Address* stack_base,
                                               Isolate* isolate) {
diff --git a/src/regexp/regexp-macro-assembler.h b/src/regexp/regexp-macro-assembler.h
index 2059933..6f79a16 100644
--- a/src/regexp/regexp-macro-assembler.h
+++ b/src/regexp/regexp-macro-assembler.h
@@ -11,6 +11,13 @@
 namespace v8 {
 namespace internal {
 
+static const uc32 kLeadSurrogateStart = 0xd800;
+static const uc32 kLeadSurrogateEnd = 0xdbff;
+static const uc32 kTrailSurrogateStart = 0xdc00;
+static const uc32 kTrailSurrogateEnd = 0xdfff;
+static const uc32 kNonBmpStart = 0x10000;
+static const uc32 kNonBmpEnd = 0x10ffff;
+
 struct DisjunctDecisionRow {
   RegExpCharacterClass cc;
   Label* on_match;
@@ -76,7 +83,7 @@
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match) = 0;
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               bool read_backward,
+                                               bool read_backward, bool unicode,
                                                Label* on_no_match) = 0;
   // Check the current character for a match with a literal character.  If we
   // fail to match then goto the on_failure label.  End of input always
@@ -146,25 +153,40 @@
   virtual void ClearRegisters(int reg_from, int reg_to) = 0;
   virtual void WriteStackPointerToRegister(int reg) = 0;
 
+  // Compares two-byte strings case insensitively.
+  // Called from generated RegExp code.
+  static int CaseInsensitiveCompareUC16(Address byte_offset1,
+                                        Address byte_offset2,
+                                        size_t byte_length, Isolate* isolate);
+
+  // Check that we are not in the middle of a surrogate pair.
+  void CheckNotInSurrogatePair(int cp_offset, Label* on_failure);
+
   // Controls the generation of large inlined constants in the code.
   void set_slow_safe(bool ssc) { slow_safe_compiler_ = ssc; }
   bool slow_safe() { return slow_safe_compiler_; }
 
-  enum GlobalMode { NOT_GLOBAL, GLOBAL, GLOBAL_NO_ZERO_LENGTH_CHECK };
+  enum GlobalMode {
+    NOT_GLOBAL,
+    GLOBAL_NO_ZERO_LENGTH_CHECK,
+    GLOBAL,
+    GLOBAL_UNICODE
+  };
   // Set whether the regular expression has the global flag.  Exiting due to
   // a failure in a global regexp may still mean success overall.
   inline void set_global_mode(GlobalMode mode) { global_mode_ = mode; }
   inline bool global() { return global_mode_ != NOT_GLOBAL; }
   inline bool global_with_zero_length_check() {
-    return global_mode_ == GLOBAL;
+    return global_mode_ == GLOBAL || global_mode_ == GLOBAL_UNICODE;
   }
+  inline bool global_unicode() { return global_mode_ == GLOBAL_UNICODE; }
 
   Isolate* isolate() const { return isolate_; }
   Zone* zone() const { return zone_; }
 
  private:
   bool slow_safe_compiler_;
-  bool global_mode_;
+  GlobalMode global_mode_;
   Isolate* isolate_;
   Zone* zone_;
 };
@@ -199,13 +221,6 @@
                       int previous_index,
                       Isolate* isolate);
 
-  // Compares two-byte strings case insensitively.
-  // Called from generated RegExp code.
-  static int CaseInsensitiveCompareUC16(Address byte_offset1,
-                                        Address byte_offset2,
-                                        size_t byte_length,
-                                        Isolate* isolate);
-
   // Called from RegExp if the backtrack stack limit is hit.
   // Tries to expand the stack. Returns the new stack-pointer if
   // successful, and updates the stack_top address, or returns 0 if unable
diff --git a/src/regexp/regexp-parser.cc b/src/regexp/regexp-parser.cc
index fa89003..46c593c 100644
--- a/src/regexp/regexp-parser.cc
+++ b/src/regexp/regexp-parser.cc
@@ -8,27 +8,32 @@
 #include "src/factory.h"
 #include "src/isolate.h"
 #include "src/objects-inl.h"
+#include "src/ostreams.h"
 #include "src/regexp/jsregexp.h"
 #include "src/utils.h"
 
+#ifdef V8_I18N_SUPPORT
+#include "unicode/uset.h"
+#endif  // V8_I18N_SUPPORT
+
 namespace v8 {
 namespace internal {
 
 RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
-                           bool multiline, bool unicode, Isolate* isolate,
-                           Zone* zone)
+                           JSRegExp::Flags flags, Isolate* isolate, Zone* zone)
     : isolate_(isolate),
       zone_(zone),
       error_(error),
       captures_(NULL),
       in_(in),
       current_(kEndMarker),
+      ignore_case_(flags & JSRegExp::kIgnoreCase),
+      multiline_(flags & JSRegExp::kMultiline),
+      unicode_(flags & JSRegExp::kUnicode),
       next_pos_(0),
       captures_started_(0),
       capture_count_(0),
       has_more_(true),
-      multiline_(multiline),
-      unicode_(unicode),
       simple_(false),
       contains_anchor_(false),
       is_scanned_for_captures_(false),
@@ -36,10 +41,28 @@
   Advance();
 }
 
+template <bool update_position>
+inline uc32 RegExpParser::ReadNext() {
+  int position = next_pos_;
+  uc32 c0 = in()->Get(position);
+  position++;
+  // Read the whole surrogate pair in case of unicode flag, if possible.
+  if (unicode() && position < in()->length() &&
+      unibrow::Utf16::IsLeadSurrogate(static_cast<uc16>(c0))) {
+    uc16 c1 = in()->Get(position);
+    if (unibrow::Utf16::IsTrailSurrogate(c1)) {
+      c0 = unibrow::Utf16::CombineSurrogatePair(static_cast<uc16>(c0), c1);
+      position++;
+    }
+  }
+  if (update_position) next_pos_ = position;
+  return c0;
+}
+
 
 uc32 RegExpParser::Next() {
   if (has_next()) {
-    return in()->Get(next_pos_);
+    return ReadNext<false>();
   } else {
     return kEndMarker;
   }
@@ -47,25 +70,14 @@
 
 
 void RegExpParser::Advance() {
-  if (next_pos_ < in()->length()) {
+  if (has_next()) {
     StackLimitCheck check(isolate());
     if (check.HasOverflowed()) {
       ReportError(CStrVector(Isolate::kStackOverflowMessage));
     } else if (zone()->excess_allocation()) {
       ReportError(CStrVector("Regular expression too large"));
     } else {
-      current_ = in()->Get(next_pos_);
-      next_pos_++;
-      // Read the whole surrogate pair in case of unicode flag, if possible.
-      if (unicode_ && next_pos_ < in()->length() &&
-          unibrow::Utf16::IsLeadSurrogate(static_cast<uc16>(current_))) {
-        uc16 trail = in()->Get(next_pos_);
-        if (unibrow::Utf16::IsTrailSurrogate(trail)) {
-          current_ = unibrow::Utf16::CombineSurrogatePair(
-              static_cast<uc16>(current_), trail);
-          next_pos_++;
-        }
-      }
+      current_ = ReadNext<true>();
     }
   } else {
     current_ = kEndMarker;
@@ -92,11 +104,28 @@
 
 bool RegExpParser::simple() { return simple_; }
 
-
-bool RegExpParser::IsSyntaxCharacter(uc32 c) {
-  return c == '^' || c == '$' || c == '\\' || c == '.' || c == '*' ||
-         c == '+' || c == '?' || c == '(' || c == ')' || c == '[' || c == ']' ||
-         c == '{' || c == '}' || c == '|';
+bool RegExpParser::IsSyntaxCharacterOrSlash(uc32 c) {
+  switch (c) {
+    case '^':
+    case '$':
+    case '\\':
+    case '.':
+    case '*':
+    case '+':
+    case '?':
+    case '(':
+    case ')':
+    case '[':
+    case ']':
+    case '{':
+    case '}':
+    case '|':
+    case '/':
+      return true;
+    default:
+      break;
+  }
+  return false;
 }
 
 
@@ -142,7 +171,7 @@
 RegExpTree* RegExpParser::ParseDisjunction() {
   // Used to store current state while parsing subexpressions.
   RegExpParserState initial_state(NULL, INITIAL, RegExpLookaround::LOOKAHEAD, 0,
-                                  zone());
+                                  ignore_case(), unicode(), zone());
   RegExpParserState* state = &initial_state;
   // Cache the builder in a local variable for quick access.
   RegExpBuilder* builder = initial_state.builder();
@@ -151,14 +180,14 @@
       case kEndMarker:
         if (state->IsSubexpression()) {
           // Inside a parenthesized group when hitting end of input.
-          ReportError(CStrVector("Unterminated group") CHECK_FAILED);
+          return ReportError(CStrVector("Unterminated group"));
         }
         DCHECK_EQ(INITIAL, state->group_type());
         // Parsing completed successfully.
         return builder->ToRegExp();
       case ')': {
         if (!state->IsSubexpression()) {
-          ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
+          return ReportError(CStrVector("Unmatched ')'"));
         }
         DCHECK_NE(INITIAL, state->group_type());
 
@@ -206,7 +235,7 @@
         return ReportError(CStrVector("Nothing to repeat"));
       case '^': {
         Advance();
-        if (multiline_) {
+        if (multiline()) {
           builder->AddAssertion(
               new (zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
         } else {
@@ -219,8 +248,8 @@
       case '$': {
         Advance();
         RegExpAssertion::AssertionType assertion_type =
-            multiline_ ? RegExpAssertion::END_OF_LINE
-                       : RegExpAssertion::END_OF_INPUT;
+            multiline() ? RegExpAssertion::END_OF_LINE
+                        : RegExpAssertion::END_OF_INPUT;
         builder->AddAssertion(new (zone()) RegExpAssertion(assertion_type));
         continue;
       }
@@ -230,8 +259,9 @@
         ZoneList<CharacterRange>* ranges =
             new (zone()) ZoneList<CharacterRange>(2, zone());
         CharacterRange::AddClassEscape('.', ranges, zone());
-        RegExpTree* atom = new (zone()) RegExpCharacterClass(ranges, false);
-        builder->AddAtom(atom);
+        RegExpCharacterClass* cc =
+            new (zone()) RegExpCharacterClass(ranges, false);
+        builder->AddCharacterClass(cc);
         break;
       }
       case '(': {
@@ -265,25 +295,25 @@
               }
             // Fall through.
             default:
-              ReportError(CStrVector("Invalid group") CHECK_FAILED);
-              break;
+              return ReportError(CStrVector("Invalid group"));
           }
           Advance(2);
         } else {
           if (captures_started_ >= kMaxCaptures) {
-            ReportError(CStrVector("Too many captures") CHECK_FAILED);
+            return ReportError(CStrVector("Too many captures"));
           }
           captures_started_++;
         }
         // Store current state and begin new disjunction parsing.
         state = new (zone()) RegExpParserState(
-            state, subexpr_type, lookaround_type, captures_started_, zone());
+            state, subexpr_type, lookaround_type, captures_started_,
+            ignore_case(), unicode(), zone());
         builder = state->builder();
         continue;
       }
       case '[': {
-        RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
-        builder->AddAtom(atom);
+        RegExpTree* cc = ParseCharacterClass(CHECK_FAILED);
+        builder->AddCharacterClass(cc->AsCharacterClass());
         break;
       }
       // Atom ::
@@ -318,8 +348,30 @@
             ZoneList<CharacterRange>* ranges =
                 new (zone()) ZoneList<CharacterRange>(2, zone());
             CharacterRange::AddClassEscape(c, ranges, zone());
-            RegExpTree* atom = new (zone()) RegExpCharacterClass(ranges, false);
-            builder->AddAtom(atom);
+            RegExpCharacterClass* cc =
+                new (zone()) RegExpCharacterClass(ranges, false);
+            builder->AddCharacterClass(cc);
+            break;
+          }
+          case 'p':
+          case 'P': {
+            uc32 p = Next();
+            Advance(2);
+            if (unicode()) {
+              if (FLAG_harmony_regexp_property) {
+                ZoneList<CharacterRange>* ranges = ParsePropertyClass();
+                if (ranges == nullptr) {
+                  return ReportError(CStrVector("Invalid property name"));
+                }
+                RegExpCharacterClass* cc =
+                    new (zone()) RegExpCharacterClass(ranges, p == 'P');
+                builder->AddCharacterClass(cc);
+              } else {
+                return ReportError(CStrVector("Invalid escape"));
+              }
+            } else {
+              builder->AddCharacter(p);
+            }
             break;
           }
           case '1':
@@ -332,7 +384,8 @@
           case '8':
           case '9': {
             int index = 0;
-            if (ParseBackReferenceIndex(&index)) {
+            bool is_backref = ParseBackReferenceIndex(&index CHECK_FAILED);
+            if (is_backref) {
               if (state->IsInsideCaptureGroup(index)) {
                 // The back reference is inside the capture group it refers to.
                 // Nothing can possibly have been captured yet, so we use empty
@@ -347,24 +400,25 @@
               }
               break;
             }
+            // With /u, no identity escapes except for syntax characters
+            // are allowed. Otherwise, all identity escapes are allowed.
+            if (unicode()) {
+              return ReportError(CStrVector("Invalid escape"));
+            }
             uc32 first_digit = Next();
             if (first_digit == '8' || first_digit == '9') {
-              // If the 'u' flag is present, only syntax characters can be
-              // escaped,
-              // no other identity escapes are allowed. If the 'u' flag is not
-              // present, all identity escapes are allowed.
-              if (!unicode_) {
-                builder->AddCharacter(first_digit);
-                Advance(2);
-              } else {
-                return ReportError(CStrVector("Invalid escape"));
-              }
+              builder->AddCharacter(first_digit);
+              Advance(2);
               break;
             }
           }
           // FALLTHROUGH
           case '0': {
             Advance();
+            if (unicode() && Next() >= '0' && Next() <= '9') {
+              // With /u, decimal escape with leading 0 are not parsed as octal.
+              return ReportError(CStrVector("Invalid decimal escape"));
+            }
             uc32 octal = ParseOctalLiteral();
             builder->AddCharacter(octal);
             break;
@@ -402,6 +456,10 @@
               // This is outside the specification. We match JSC in
               // reading the backslash as a literal character instead
               // of as starting an escape.
+              if (unicode()) {
+                // With /u, invalid escapes are not treated as identity escapes.
+                return ReportError(CStrVector("Invalid unicode escape"));
+              }
               builder->AddCharacter('\\');
             } else {
               Advance(2);
@@ -414,11 +472,10 @@
             uc32 value;
             if (ParseHexEscape(2, &value)) {
               builder->AddCharacter(value);
-            } else if (!unicode_) {
+            } else if (!unicode()) {
               builder->AddCharacter('x');
             } else {
-              // If the 'u' flag is present, invalid escapes are not treated as
-              // identity escapes.
+              // With /u, invalid escapes are not treated as identity escapes.
               return ReportError(CStrVector("Invalid escape"));
             }
             break;
@@ -427,24 +484,20 @@
             Advance(2);
             uc32 value;
             if (ParseUnicodeEscape(&value)) {
-              builder->AddUnicodeCharacter(value);
-            } else if (!unicode_) {
+              builder->AddEscapedUnicodeCharacter(value);
+            } else if (!unicode()) {
               builder->AddCharacter('u');
             } else {
-              // If the 'u' flag is present, invalid escapes are not treated as
-              // identity escapes.
+              // With /u, invalid escapes are not treated as identity escapes.
               return ReportError(CStrVector("Invalid unicode escape"));
             }
             break;
           }
           default:
             Advance();
-            // If the 'u' flag is present, only syntax characters can be
-            // escaped, no
-            // other identity escapes are allowed. If the 'u' flag is not
-            // present,
-            // all identity escapes are allowed.
-            if (!unicode_ || IsSyntaxCharacter(current())) {
+            // With /u, no identity escapes except for syntax characters
+            // are allowed. Otherwise, all identity escapes are allowed.
+            if (!unicode() || IsSyntaxCharacterOrSlash(current())) {
               builder->AddCharacter(current());
               Advance();
             } else {
@@ -456,10 +509,16 @@
       case '{': {
         int dummy;
         if (ParseIntervalQuantifier(&dummy, &dummy)) {
-          ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
+          return ReportError(CStrVector("Nothing to repeat"));
         }
         // fallthrough
       }
+      case '}':
+      case ']':
+        if (unicode()) {
+          return ReportError(CStrVector("Lone quantifier brackets"));
+        }
+      // fallthrough
       default:
         builder->AddUnicodeCharacter(current());
         Advance();
@@ -492,13 +551,15 @@
       case '{':
         if (ParseIntervalQuantifier(&min, &max)) {
           if (max < min) {
-            ReportError(CStrVector("numbers out of order in {} quantifier.")
-                            CHECK_FAILED);
+            return ReportError(
+                CStrVector("numbers out of order in {} quantifier"));
           }
           break;
-        } else {
-          continue;
+        } else if (unicode()) {
+          // With /u, incomplete quantifiers are not allowed.
+          return ReportError(CStrVector("Incomplete quantifier"));
         }
+        continue;
       default:
         continue;
     }
@@ -511,7 +572,9 @@
       quantifier_type = RegExpQuantifier::POSSESSIVE;
       Advance();
     }
-    builder->AddQuantifierToAtom(min, max, quantifier_type);
+    if (!builder->AddQuantifierToAtom(min, max, quantifier_type)) {
+      return ReportError(CStrVector("Invalid quantifier"));
+    }
   }
 }
 
@@ -740,12 +803,12 @@
   return true;
 }
 
-
+// This parses RegExpUnicodeEscapeSequence as described in ECMA262.
 bool RegExpParser::ParseUnicodeEscape(uc32* value) {
   // Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
   // allowed). In the latter case, the number of hex digits between { } is
   // arbitrary. \ and u have already been read.
-  if (current() == '{' && unicode_) {
+  if (current() == '{' && unicode()) {
     int start = position();
     Advance();
     if (ParseUnlimitedLengthHexNumber(0x10ffff, value)) {
@@ -758,9 +821,75 @@
     return false;
   }
   // \u but no {, or \u{...} escapes not allowed.
-  return ParseHexEscape(4, value);
+  bool result = ParseHexEscape(4, value);
+  if (result && unicode() && unibrow::Utf16::IsLeadSurrogate(*value) &&
+      current() == '\\') {
+    // Attempt to read trail surrogate.
+    int start = position();
+    if (Next() == 'u') {
+      Advance(2);
+      uc32 trail;
+      if (ParseHexEscape(4, &trail) &&
+          unibrow::Utf16::IsTrailSurrogate(trail)) {
+        *value = unibrow::Utf16::CombineSurrogatePair(static_cast<uc16>(*value),
+                                                      static_cast<uc16>(trail));
+        return true;
+      }
+    }
+    Reset(start);
+  }
+  return result;
 }
 
+ZoneList<CharacterRange>* RegExpParser::ParsePropertyClass() {
+#ifdef V8_I18N_SUPPORT
+  char property_name[3];
+  memset(property_name, 0, sizeof(property_name));
+  if (current() == '{') {
+    Advance();
+    if (current() < 'A' || current() > 'Z') return nullptr;
+    property_name[0] = static_cast<char>(current());
+    Advance();
+    if (current() >= 'a' && current() <= 'z') {
+      property_name[1] = static_cast<char>(current());
+      Advance();
+    }
+    if (current() != '}') return nullptr;
+  } else if (current() >= 'A' && current() <= 'Z') {
+    property_name[0] = static_cast<char>(current());
+  } else {
+    return nullptr;
+  }
+  Advance();
+
+  int32_t category =
+      u_getPropertyValueEnum(UCHAR_GENERAL_CATEGORY_MASK, property_name);
+  if (category == UCHAR_INVALID_CODE) return nullptr;
+
+  USet* set = uset_openEmpty();
+  UErrorCode ec = U_ZERO_ERROR;
+  uset_applyIntPropertyValue(set, UCHAR_GENERAL_CATEGORY_MASK, category, &ec);
+  ZoneList<CharacterRange>* ranges = nullptr;
+  if (ec == U_ZERO_ERROR && !uset_isEmpty(set)) {
+    uset_removeAllStrings(set);
+    int item_count = uset_getItemCount(set);
+    ranges = new (zone()) ZoneList<CharacterRange>(item_count, zone());
+    int item_result = 0;
+    for (int i = 0; i < item_count; i++) {
+      uc32 start = 0;
+      uc32 end = 0;
+      item_result += uset_getItem(set, i, &start, &end, nullptr, 0, &ec);
+      ranges->Add(CharacterRange::Range(start, end), zone());
+    }
+    DCHECK_EQ(U_ZERO_ERROR, ec);
+    DCHECK_EQ(0, item_result);
+  }
+  uset_close(set);
+  return ranges;
+#else   // V8_I18N_SUPPORT
+  return nullptr;
+#endif  // V8_I18N_SUPPORT
+}
 
 bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
   uc32 x = 0;
@@ -809,20 +938,35 @@
     case 'c': {
       uc32 controlLetter = Next();
       uc32 letter = controlLetter & ~('A' ^ 'a');
-      // For compatibility with JSC, inside a character class
-      // we also accept digits and underscore as control characters.
-      if ((controlLetter >= '0' && controlLetter <= '9') ||
-          controlLetter == '_' || (letter >= 'A' && letter <= 'Z')) {
+      // For compatibility with JSC, inside a character class. We also accept
+      // digits and underscore as control characters, unless with /u.
+      if (letter >= 'A' && letter <= 'Z') {
         Advance(2);
         // Control letters mapped to ASCII control characters in the range
         // 0x00-0x1f.
         return controlLetter & 0x1f;
       }
+      if (unicode()) {
+        // With /u, invalid escapes are not treated as identity escapes.
+        ReportError(CStrVector("Invalid class escape"));
+        return 0;
+      }
+      if ((controlLetter >= '0' && controlLetter <= '9') ||
+          controlLetter == '_') {
+        Advance(2);
+        return controlLetter & 0x1f;
+      }
       // We match JSC in reading the backslash as a literal
       // character instead of as starting an escape.
       return '\\';
     }
     case '0':
+      // With /u, \0 is interpreted as NUL if not followed by another digit.
+      if (unicode() && !(Next() >= '0' && Next() <= '9')) {
+        Advance();
+        return 0;
+      }
+    // Fall through.
     case '1':
     case '2':
     case '3':
@@ -833,43 +977,43 @@
       // For compatibility, we interpret a decimal escape that isn't
       // a back reference (and therefore either \0 or not valid according
       // to the specification) as a 1..3 digit octal character code.
+      if (unicode()) {
+        // With /u, decimal escape is not interpreted as octal character code.
+        ReportError(CStrVector("Invalid class escape"));
+        return 0;
+      }
       return ParseOctalLiteral();
     case 'x': {
       Advance();
       uc32 value;
-      if (ParseHexEscape(2, &value)) {
-        return value;
+      if (ParseHexEscape(2, &value)) return value;
+      if (unicode()) {
+        // With /u, invalid escapes are not treated as identity escapes.
+        ReportError(CStrVector("Invalid escape"));
+        return 0;
       }
-      if (!unicode_) {
-        // If \x is not followed by a two-digit hexadecimal, treat it
-        // as an identity escape.
-        return 'x';
-      }
-      // If the 'u' flag is present, invalid escapes are not treated as
-      // identity escapes.
-      ReportError(CStrVector("Invalid escape"));
-      return 0;
+      // If \x is not followed by a two-digit hexadecimal, treat it
+      // as an identity escape.
+      return 'x';
     }
     case 'u': {
       Advance();
       uc32 value;
-      if (ParseUnicodeEscape(&value)) {
-        return value;
+      if (ParseUnicodeEscape(&value)) return value;
+      if (unicode()) {
+        // With /u, invalid escapes are not treated as identity escapes.
+        ReportError(CStrVector("Invalid unicode escape"));
+        return 0;
       }
-      if (!unicode_) {
-        return 'u';
-      }
-      // If the 'u' flag is present, invalid escapes are not treated as
-      // identity escapes.
-      ReportError(CStrVector("Invalid unicode escape"));
-      return 0;
+      // If \u is not followed by a two-digit hexadecimal, treat it
+      // as an identity escape.
+      return 'u';
     }
     default: {
       uc32 result = current();
-      // If the 'u' flag is present, only syntax characters can be escaped, no
-      // other identity escapes are allowed. If the 'u' flag is not present, all
-      // identity escapes are allowed.
-      if (!unicode_ || IsSyntaxCharacter(result)) {
+      // With /u, no identity escapes except for syntax characters and '-' are
+      // allowed. Otherwise, all identity escapes are allowed.
+      if (!unicode() || IsSyntaxCharacterOrSlash(result) || result == '-') {
         Advance();
         return result;
       }
@@ -899,13 +1043,13 @@
       case kEndMarker:
         return ReportError(CStrVector("\\ at end of pattern"));
       default:
-        uc32 c = ParseClassCharacterEscape(CHECK_FAILED);
-        return CharacterRange::Singleton(c);
+        first = ParseClassCharacterEscape(CHECK_FAILED);
     }
   } else {
     Advance();
-    return CharacterRange::Singleton(first);
   }
+
+  return CharacterRange::Singleton(first);
 }
 
 
@@ -927,6 +1071,7 @@
 
 RegExpTree* RegExpParser::ParseCharacterClass() {
   static const char* kUnterminated = "Unterminated character class";
+  static const char* kRangeInvalid = "Invalid character class";
   static const char* kRangeOutOfOrder = "Range out of order in character class";
 
   DCHECK_EQ(current(), '[');
@@ -956,13 +1101,18 @@
       CharacterRange next = ParseClassAtom(&char_class_2 CHECK_FAILED);
       if (char_class != kNoCharClass || char_class_2 != kNoCharClass) {
         // Either end is an escaped character class. Treat the '-' verbatim.
+        if (unicode()) {
+          // ES2015 21.2.2.15.1 step 1.
+          return ReportError(CStrVector(kRangeInvalid));
+        }
         AddRangeOrEscape(ranges, char_class, first, zone());
         ranges->Add(CharacterRange::Singleton('-'), zone());
         AddRangeOrEscape(ranges, char_class_2, next, zone());
         continue;
       }
+      // ES2015 21.2.2.15.1 step 6.
       if (first.from() > next.to()) {
-        return ReportError(CStrVector(kRangeOutOfOrder) CHECK_FAILED);
+        return ReportError(CStrVector(kRangeOutOfOrder));
       }
       ranges->Add(CharacterRange::Range(first.from(), next.to()), zone());
     } else {
@@ -970,7 +1120,7 @@
     }
   }
   if (!has_more()) {
-    return ReportError(CStrVector(kUnterminated) CHECK_FAILED);
+    return ReportError(CStrVector(kUnterminated));
   }
   Advance();
   if (ranges->length() == 0) {
@@ -985,10 +1135,10 @@
 
 
 bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
-                               FlatStringReader* input, bool multiline,
-                               bool unicode, RegExpCompileData* result) {
+                               FlatStringReader* input, JSRegExp::Flags flags,
+                               RegExpCompileData* result) {
   DCHECK(result != NULL);
-  RegExpParser parser(input, &result->error, multiline, unicode, isolate, zone);
+  RegExpParser parser(input, &result->error, flags, isolate, zone);
   RegExpTree* tree = parser.ParsePattern();
   if (parser.failed()) {
     DCHECK(tree == NULL);
@@ -1010,11 +1160,13 @@
   return !parser.failed();
 }
 
-
-RegExpBuilder::RegExpBuilder(Zone* zone)
+RegExpBuilder::RegExpBuilder(Zone* zone, bool ignore_case, bool unicode)
     : zone_(zone),
       pending_empty_(false),
+      ignore_case_(ignore_case),
+      unicode_(unicode),
       characters_(NULL),
+      pending_surrogate_(kNoPendingSurrogate),
       terms_(),
       alternatives_()
 #ifdef DEBUG
@@ -1025,7 +1177,51 @@
 }
 
 
+void RegExpBuilder::AddLeadSurrogate(uc16 lead_surrogate) {
+  DCHECK(unibrow::Utf16::IsLeadSurrogate(lead_surrogate));
+  FlushPendingSurrogate();
+  // Hold onto the lead surrogate, waiting for a trail surrogate to follow.
+  pending_surrogate_ = lead_surrogate;
+}
+
+
+void RegExpBuilder::AddTrailSurrogate(uc16 trail_surrogate) {
+  DCHECK(unibrow::Utf16::IsTrailSurrogate(trail_surrogate));
+  if (pending_surrogate_ != kNoPendingSurrogate) {
+    uc16 lead_surrogate = pending_surrogate_;
+    pending_surrogate_ = kNoPendingSurrogate;
+    DCHECK(unibrow::Utf16::IsLeadSurrogate(lead_surrogate));
+    uc32 combined =
+        unibrow::Utf16::CombineSurrogatePair(lead_surrogate, trail_surrogate);
+    if (NeedsDesugaringForIgnoreCase(combined)) {
+      AddCharacterClassForDesugaring(combined);
+    } else {
+      ZoneList<uc16> surrogate_pair(2, zone());
+      surrogate_pair.Add(lead_surrogate, zone());
+      surrogate_pair.Add(trail_surrogate, zone());
+      RegExpAtom* atom =
+          new (zone()) RegExpAtom(surrogate_pair.ToConstVector());
+      AddAtom(atom);
+    }
+  } else {
+    pending_surrogate_ = trail_surrogate;
+    FlushPendingSurrogate();
+  }
+}
+
+
+void RegExpBuilder::FlushPendingSurrogate() {
+  if (pending_surrogate_ != kNoPendingSurrogate) {
+    DCHECK(unicode());
+    uc32 c = pending_surrogate_;
+    pending_surrogate_ = kNoPendingSurrogate;
+    AddCharacterClassForDesugaring(c);
+  }
+}
+
+
 void RegExpBuilder::FlushCharacters() {
+  FlushPendingSurrogate();
   pending_empty_ = false;
   if (characters_ != NULL) {
     RegExpTree* atom = new (zone()) RegExpAtom(characters_->ToConstVector());
@@ -1053,31 +1249,61 @@
 
 
 void RegExpBuilder::AddCharacter(uc16 c) {
+  FlushPendingSurrogate();
   pending_empty_ = false;
-  if (characters_ == NULL) {
-    characters_ = new (zone()) ZoneList<uc16>(4, zone());
+  if (NeedsDesugaringForIgnoreCase(c)) {
+    AddCharacterClassForDesugaring(c);
+  } else {
+    if (characters_ == NULL) {
+      characters_ = new (zone()) ZoneList<uc16>(4, zone());
+    }
+    characters_->Add(c, zone());
+    LAST(ADD_CHAR);
   }
-  characters_->Add(c, zone());
-  LAST(ADD_CHAR);
 }
 
 
 void RegExpBuilder::AddUnicodeCharacter(uc32 c) {
   if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
-    ZoneList<uc16> surrogate_pair(2, zone());
-    surrogate_pair.Add(unibrow::Utf16::LeadSurrogate(c), zone());
-    surrogate_pair.Add(unibrow::Utf16::TrailSurrogate(c), zone());
-    RegExpAtom* atom = new (zone()) RegExpAtom(surrogate_pair.ToConstVector());
-    AddAtom(atom);
+    DCHECK(unicode());
+    AddLeadSurrogate(unibrow::Utf16::LeadSurrogate(c));
+    AddTrailSurrogate(unibrow::Utf16::TrailSurrogate(c));
+  } else if (unicode() && unibrow::Utf16::IsLeadSurrogate(c)) {
+    AddLeadSurrogate(c);
+  } else if (unicode() && unibrow::Utf16::IsTrailSurrogate(c)) {
+    AddTrailSurrogate(c);
   } else {
     AddCharacter(static_cast<uc16>(c));
   }
 }
 
+void RegExpBuilder::AddEscapedUnicodeCharacter(uc32 character) {
+  // A lead or trail surrogate parsed via escape sequence will not
+  // pair up with any preceding lead or following trail surrogate.
+  FlushPendingSurrogate();
+  AddUnicodeCharacter(character);
+  FlushPendingSurrogate();
+}
 
 void RegExpBuilder::AddEmpty() { pending_empty_ = true; }
 
 
+void RegExpBuilder::AddCharacterClass(RegExpCharacterClass* cc) {
+  if (NeedsDesugaringForUnicode(cc)) {
+    // With /u, character class needs to be desugared, so it
+    // must be a standalone term instead of being part of a RegExpText.
+    AddTerm(cc);
+  } else {
+    AddAtom(cc);
+  }
+}
+
+void RegExpBuilder::AddCharacterClassForDesugaring(uc32 c) {
+  AddTerm(new (zone()) RegExpCharacterClass(
+      CharacterRange::List(zone(), CharacterRange::Singleton(c)), false));
+}
+
+
 void RegExpBuilder::AddAtom(RegExpTree* term) {
   if (term->IsEmpty()) {
     AddEmpty();
@@ -1094,6 +1320,13 @@
 }
 
 
+void RegExpBuilder::AddTerm(RegExpTree* term) {
+  FlushText();
+  terms_.Add(term, zone());
+  LAST(ADD_ATOM);
+}
+
+
 void RegExpBuilder::AddAssertion(RegExpTree* assert) {
   FlushText();
   terms_.Add(assert, zone());
@@ -1121,6 +1354,47 @@
 }
 
 
+bool RegExpBuilder::NeedsDesugaringForUnicode(RegExpCharacterClass* cc) {
+  if (!unicode()) return false;
+  switch (cc->standard_type()) {
+    case 's':        // white space
+    case 'w':        // ASCII word character
+    case 'd':        // ASCII digit
+      return false;  // These characters do not need desugaring.
+    default:
+      break;
+  }
+  ZoneList<CharacterRange>* ranges = cc->ranges(zone());
+  CharacterRange::Canonicalize(ranges);
+  for (int i = ranges->length() - 1; i >= 0; i--) {
+    uc32 from = ranges->at(i).from();
+    uc32 to = ranges->at(i).to();
+    // Check for non-BMP characters.
+    if (to >= kNonBmpStart) return true;
+    // Check for lone surrogates.
+    if (from <= kTrailSurrogateEnd && to >= kLeadSurrogateStart) return true;
+  }
+  return false;
+}
+
+
+bool RegExpBuilder::NeedsDesugaringForIgnoreCase(uc32 c) {
+#ifdef V8_I18N_SUPPORT
+  if (unicode() && ignore_case()) {
+    USet* set = uset_open(c, c);
+    uset_closeOver(set, USET_CASE_INSENSITIVE);
+    uset_removeAllStrings(set);
+    bool result = uset_size(set) > 1;
+    uset_close(set);
+    return result;
+  }
+  // In the case where ICU is not included, we act as if the unicode flag is
+  // not set, and do not desugar.
+#endif  // V8_I18N_SUPPORT
+  return false;
+}
+
+
 RegExpTree* RegExpBuilder::ToRegExp() {
   FlushTerms();
   int num_alternatives = alternatives_.length();
@@ -1129,12 +1403,12 @@
   return new (zone()) RegExpDisjunction(alternatives_.GetList(zone()));
 }
 
-
-void RegExpBuilder::AddQuantifierToAtom(
+bool RegExpBuilder::AddQuantifierToAtom(
     int min, int max, RegExpQuantifier::QuantifierType quantifier_type) {
+  FlushPendingSurrogate();
   if (pending_empty_) {
     pending_empty_ = false;
-    return;
+    return true;
   }
   RegExpTree* atom;
   if (characters_ != NULL) {
@@ -1157,23 +1431,26 @@
   } else if (terms_.length() > 0) {
     DCHECK(last_added_ == ADD_ATOM);
     atom = terms_.RemoveLast();
+    // With /u, lookarounds are not quantifiable.
+    if (unicode() && atom->IsLookaround()) return false;
     if (atom->max_match() == 0) {
       // Guaranteed to only match an empty string.
       LAST(ADD_TERM);
       if (min == 0) {
-        return;
+        return true;
       }
       terms_.Add(atom, zone());
-      return;
+      return true;
     }
   } else {
     // Only call immediately after adding an atom or character!
     UNREACHABLE();
-    return;
+    return false;
   }
   terms_.Add(new (zone()) RegExpQuantifier(min, max, quantifier_type, atom),
              zone());
   LAST(ADD_TERM);
+  return true;
 }
 
 }  // namespace internal
diff --git a/src/regexp/regexp-parser.h b/src/regexp/regexp-parser.h
index af9b765..acf783c 100644
--- a/src/regexp/regexp-parser.h
+++ b/src/regexp/regexp-parser.h
@@ -99,28 +99,43 @@
 // Accumulates RegExp atoms and assertions into lists of terms and alternatives.
 class RegExpBuilder : public ZoneObject {
  public:
-  explicit RegExpBuilder(Zone* zone);
+  RegExpBuilder(Zone* zone, bool ignore_case, bool unicode);
   void AddCharacter(uc16 character);
   void AddUnicodeCharacter(uc32 character);
+  void AddEscapedUnicodeCharacter(uc32 character);
   // "Adds" an empty expression. Does nothing except consume a
   // following quantifier
   void AddEmpty();
+  void AddCharacterClass(RegExpCharacterClass* cc);
+  void AddCharacterClassForDesugaring(uc32 c);
   void AddAtom(RegExpTree* tree);
+  void AddTerm(RegExpTree* tree);
   void AddAssertion(RegExpTree* tree);
   void NewAlternative();  // '|'
-  void AddQuantifierToAtom(int min, int max,
+  bool AddQuantifierToAtom(int min, int max,
                            RegExpQuantifier::QuantifierType type);
   RegExpTree* ToRegExp();
 
  private:
+  static const uc16 kNoPendingSurrogate = 0;
+  void AddLeadSurrogate(uc16 lead_surrogate);
+  void AddTrailSurrogate(uc16 trail_surrogate);
+  void FlushPendingSurrogate();
   void FlushCharacters();
   void FlushText();
   void FlushTerms();
+  bool NeedsDesugaringForUnicode(RegExpCharacterClass* cc);
+  bool NeedsDesugaringForIgnoreCase(uc32 c);
   Zone* zone() const { return zone_; }
+  bool ignore_case() const { return ignore_case_; }
+  bool unicode() const { return unicode_; }
 
   Zone* zone_;
   bool pending_empty_;
+  bool ignore_case_;
+  bool unicode_;
   ZoneList<uc16>* characters_;
+  uc16 pending_surrogate_;
   BufferedZoneList<RegExpTree, 2> terms_;
   BufferedZoneList<RegExpTree, 2> text_;
   BufferedZoneList<RegExpTree, 2> alternatives_;
@@ -135,12 +150,11 @@
 
 class RegExpParser BASE_EMBEDDED {
  public:
-  RegExpParser(FlatStringReader* in, Handle<String>* error, bool multiline_mode,
-               bool unicode, Isolate* isolate, Zone* zone);
+  RegExpParser(FlatStringReader* in, Handle<String>* error,
+               JSRegExp::Flags flags, Isolate* isolate, Zone* zone);
 
   static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
-                          bool multiline, bool unicode,
-                          RegExpCompileData* result);
+                          JSRegExp::Flags flags, RegExpCompileData* result);
 
   RegExpTree* ParsePattern();
   RegExpTree* ParseDisjunction();
@@ -160,6 +174,7 @@
   bool ParseHexEscape(int length, uc32* value);
   bool ParseUnicodeEscape(uc32* value);
   bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
+  ZoneList<CharacterRange>* ParsePropertyClass();
 
   uc32 ParseOctalLiteral();
 
@@ -183,8 +198,11 @@
   int captures_started() { return captures_started_; }
   int position() { return next_pos_ - 1; }
   bool failed() { return failed_; }
+  bool ignore_case() const { return ignore_case_; }
+  bool multiline() const { return multiline_; }
+  bool unicode() const { return unicode_; }
 
-  static bool IsSyntaxCharacter(uc32 c);
+  static bool IsSyntaxCharacterOrSlash(uc32 c);
 
   static const int kMaxCaptures = 1 << 16;
   static const uc32 kEndMarker = (1 << 21);
@@ -203,9 +221,10 @@
     RegExpParserState(RegExpParserState* previous_state,
                       SubexpressionType group_type,
                       RegExpLookaround::Type lookaround_type,
-                      int disjunction_capture_index, Zone* zone)
+                      int disjunction_capture_index, bool ignore_case,
+                      bool unicode, Zone* zone)
         : previous_state_(previous_state),
-          builder_(new (zone) RegExpBuilder(zone)),
+          builder_(new (zone) RegExpBuilder(zone, ignore_case, unicode)),
           group_type_(group_type),
           lookaround_type_(lookaround_type),
           disjunction_capture_index_(disjunction_capture_index) {}
@@ -249,6 +268,8 @@
   bool has_more() { return has_more_; }
   bool has_next() { return next_pos_ < in()->length(); }
   uc32 Next();
+  template <bool update_position>
+  uc32 ReadNext();
   FlatStringReader* in() { return in_; }
   void ScanForCaptures();
 
@@ -258,13 +279,14 @@
   ZoneList<RegExpCapture*>* captures_;
   FlatStringReader* in_;
   uc32 current_;
+  bool ignore_case_;
+  bool multiline_;
+  bool unicode_;
   int next_pos_;
   int captures_started_;
   // The capture count is only valid after we have scanned for captures.
   int capture_count_;
   bool has_more_;
-  bool multiline_;
-  bool unicode_;
   bool simple_;
   bool contains_anchor_;
   bool is_scanned_for_captures_;
diff --git a/src/regexp/x64/regexp-macro-assembler-x64.cc b/src/regexp/x64/regexp-macro-assembler-x64.cc
index 286f159..952034f 100644
--- a/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -203,7 +203,7 @@
 
 
 void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
-    int start_reg, bool read_backward, Label* on_no_match) {
+    int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
   Label fallthrough;
   ReadPositionFromRegister(rdx, start_reg);  // Offset of start of capture
   ReadPositionFromRegister(rbx, start_reg + 1);  // Offset of end of capture
@@ -308,8 +308,10 @@
     //   Address byte_offset1 - Address captured substring's start.
     //   Address byte_offset2 - Address of current character position.
     //   size_t byte_length - length of capture in bytes(!)
-    //   Isolate* isolate
+//   Isolate* isolate or 0 if unicode flag.
 #ifdef _WIN64
+    DCHECK(rcx.is(arg_reg_1));
+    DCHECK(rdx.is(arg_reg_2));
     // Compute and set byte_offset1 (start of capture).
     __ leap(rcx, Operand(rsi, rdx, times_1, 0));
     // Set byte_offset2.
@@ -317,11 +319,9 @@
     if (read_backward) {
       __ subq(rdx, rbx);
     }
-    // Set byte_length.
-    __ movp(r8, rbx);
-    // Isolate.
-    __ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
 #else  // AMD64 calling convention
+    DCHECK(rdi.is(arg_reg_1));
+    DCHECK(rsi.is(arg_reg_2));
     // Compute byte_offset2 (current position = rsi+rdi).
     __ leap(rax, Operand(rsi, rdi, times_1, 0));
     // Compute and set byte_offset1 (start of capture).
@@ -331,11 +331,19 @@
     if (read_backward) {
       __ subq(rsi, rbx);
     }
+#endif  // _WIN64
+
     // Set byte_length.
-    __ movp(rdx, rbx);
+    __ movp(arg_reg_3, rbx);
     // Isolate.
-    __ LoadAddress(rcx, ExternalReference::isolate_address(isolate()));
-#endif
+#ifdef V8_I18N_SUPPORT
+    if (unicode) {
+      __ movp(arg_reg_4, Immediate(0));
+    } else  // NOLINT
+#endif      // V8_I18N_SUPPORT
+    {
+      __ LoadAddress(arg_reg_4, ExternalReference::isolate_address(isolate()));
+    }
 
     { // NOLINT: Can't find a way to open this scope without confusing the
       // linter.
@@ -869,11 +877,14 @@
         __ testp(rdi, rdi);
         __ j(zero, &exit_label_, Label::kNear);
         // Advance current position after a zero-length match.
+        Label advance;
+        __ bind(&advance);
         if (mode_ == UC16) {
           __ addq(rdi, Immediate(2));
         } else {
           __ incq(rdi);
         }
+        if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
       }
 
       __ jmp(&load_char_start_regexp);
diff --git a/src/regexp/x64/regexp-macro-assembler-x64.h b/src/regexp/x64/regexp-macro-assembler-x64.h
index 2578047..4c37771 100644
--- a/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -38,7 +38,7 @@
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match);
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               bool read_backward,
+                                               bool read_backward, bool unicode,
                                                Label* on_no_match);
   virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
   virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/src/regexp/x87/regexp-macro-assembler-x87.cc b/src/regexp/x87/regexp-macro-assembler-x87.cc
index 01d0b24..6e62092 100644
--- a/src/regexp/x87/regexp-macro-assembler-x87.cc
+++ b/src/regexp/x87/regexp-macro-assembler-x87.cc
@@ -187,9 +187,8 @@
   __ bind(&fallthrough);
 }
 
-
 void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
-    int start_reg, bool read_backward, Label* on_no_match) {
+    int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
   Label fallthrough;
   __ mov(edx, register_location(start_reg));  // Index of start of capture
   __ mov(ebx, register_location(start_reg + 1));  // Index of end of capture
@@ -296,11 +295,18 @@
     //   Address byte_offset1 - Address captured substring's start.
     //   Address byte_offset2 - Address of current character position.
     //   size_t byte_length - length of capture in bytes(!)
-    //   Isolate* isolate
+//   Isolate* isolate or 0 if unicode flag.
 
     // Set isolate.
-    __ mov(Operand(esp, 3 * kPointerSize),
-           Immediate(ExternalReference::isolate_address(isolate())));
+#ifdef V8_I18N_SUPPORT
+    if (unicode) {
+      __ mov(Operand(esp, 3 * kPointerSize), Immediate(0));
+    } else  // NOLINT
+#endif      // V8_I18N_SUPPORT
+    {
+      __ mov(Operand(esp, 3 * kPointerSize),
+             Immediate(ExternalReference::isolate_address(isolate())));
+    }
     // Set byte_length.
     __ mov(Operand(esp, 2 * kPointerSize), ebx);
     // Set byte_offset2.
@@ -822,13 +828,15 @@
         __ test(edi, edi);
         __ j(zero, &exit_label_, Label::kNear);
         // Advance current position after a zero-length match.
+        Label advance;
+        __ bind(&advance);
         if (mode_ == UC16) {
           __ add(edi, Immediate(2));
         } else {
           __ inc(edi);
         }
+        if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
       }
-
       __ jmp(&load_char_start_regexp);
     } else {
       __ mov(eax, Immediate(SUCCESS));
diff --git a/src/regexp/x87/regexp-macro-assembler-x87.h b/src/regexp/x87/regexp-macro-assembler-x87.h
index c955412..2f68961 100644
--- a/src/regexp/x87/regexp-macro-assembler-x87.h
+++ b/src/regexp/x87/regexp-macro-assembler-x87.h
@@ -37,7 +37,7 @@
   virtual void CheckNotBackReference(int start_reg, bool read_backward,
                                      Label* on_no_match);
   virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
-                                               bool read_backward,
+                                               bool read_backward, bool unicode,
                                                Label* on_no_match);
   virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
   virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 2d4ee9c..e17cbb1 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -58,16 +58,18 @@
                         int* ic_with_type_info_count, int* ic_generic_count,
                         int* ic_total_count, int* type_info_percentage,
                         int* generic_percentage) {
-  Code* shared_code = shared->code();
   *ic_total_count = 0;
   *ic_generic_count = 0;
   *ic_with_type_info_count = 0;
-  Object* raw_info = shared_code->type_feedback_info();
-  if (raw_info->IsTypeFeedbackInfo()) {
-    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
-    *ic_with_type_info_count = info->ic_with_type_info_count();
-    *ic_generic_count = info->ic_generic_count();
-    *ic_total_count = info->ic_total_count();
+  if (shared->code()->kind() == Code::FUNCTION) {
+    Code* shared_code = shared->code();
+    Object* raw_info = shared_code->type_feedback_info();
+    if (raw_info->IsTypeFeedbackInfo()) {
+      TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
+      *ic_with_type_info_count = info->ic_with_type_info_count();
+      *ic_generic_count = info->ic_generic_count();
+      *ic_total_count = info->ic_total_count();
+    }
   }
 
   // Harvest vector-ics as well
@@ -136,8 +138,160 @@
   }
 }
 
+void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
+                                               int frame_count,
+                                               bool frame_optimized) {
+  SharedFunctionInfo* shared = function->shared();
+  Code* shared_code = shared->code();
+  if (shared_code->kind() != Code::FUNCTION) return;
+  if (function->IsInOptimizationQueue()) return;
 
-void RuntimeProfiler::OptimizeNow() {
+  if (FLAG_always_osr) {
+    AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker);
+    // Fall through and do a normal optimized compile as well.
+  } else if (!frame_optimized &&
+             (function->IsMarkedForOptimization() ||
+              function->IsMarkedForConcurrentOptimization() ||
+              function->IsOptimized())) {
+    // Attempt OSR if we are still running unoptimized code even though the
+    // the function has long been marked or even already been optimized.
+    int ticks = shared_code->profiler_ticks();
+    int64_t allowance =
+        kOSRCodeSizeAllowanceBase +
+        static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick;
+    if (shared_code->CodeSize() > allowance &&
+        ticks < Code::ProfilerTicksField::kMax) {
+      shared_code->set_profiler_ticks(ticks + 1);
+    } else {
+      AttemptOnStackReplacement(function);
+    }
+    return;
+  }
+
+  // Only record top-level code on top of the execution stack and
+  // avoid optimizing excessively large scripts since top-level code
+  // will be executed only once.
+  const int kMaxToplevelSourceSize = 10 * 1024;
+  if (shared->is_toplevel() &&
+      (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
+    return;
+  }
+
+  // Do not record non-optimizable functions.
+  if (shared->optimization_disabled()) {
+    if (shared->deopt_count() >= FLAG_max_opt_count) {
+      // If optimization was disabled due to many deoptimizations,
+      // then check if the function is hot and try to reenable optimization.
+      int ticks = shared_code->profiler_ticks();
+      if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
+        shared_code->set_profiler_ticks(0);
+        shared->TryReenableOptimization();
+      } else {
+        shared_code->set_profiler_ticks(ticks + 1);
+      }
+    }
+    return;
+  }
+  if (function->IsOptimized()) return;
+
+  int ticks = shared_code->profiler_ticks();
+
+  if (ticks >= kProfilerTicksBeforeOptimization) {
+    int typeinfo, generic, total, type_percentage, generic_percentage;
+    GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
+                &generic_percentage);
+    if (type_percentage >= FLAG_type_info_threshold &&
+        generic_percentage <= FLAG_generic_ic_threshold) {
+      // If this particular function hasn't had any ICs patched for enough
+      // ticks, optimize it now.
+      Optimize(function, "hot and stable");
+    } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
+      Optimize(function, "not much type info but very hot");
+    } else {
+      shared_code->set_profiler_ticks(ticks + 1);
+      if (FLAG_trace_opt_verbose) {
+        PrintF("[not yet optimizing ");
+        function->PrintName();
+        PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
+               type_percentage);
+      }
+    }
+  } else if (!any_ic_changed_ &&
+             shared_code->instruction_size() < kMaxSizeEarlyOpt) {
+    // If no IC was patched since the last tick and this function is very
+    // small, optimistically optimize it now.
+    int typeinfo, generic, total, type_percentage, generic_percentage;
+    GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
+                &generic_percentage);
+    if (type_percentage >= FLAG_type_info_threshold &&
+        generic_percentage <= FLAG_generic_ic_threshold) {
+      Optimize(function, "small function");
+    } else {
+      shared_code->set_profiler_ticks(ticks + 1);
+    }
+  } else {
+    shared_code->set_profiler_ticks(ticks + 1);
+  }
+}
+
+void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
+                                            bool frame_optimized) {
+  if (function->IsInOptimizationQueue()) return;
+
+  SharedFunctionInfo* shared = function->shared();
+  int ticks = shared->profiler_ticks();
+
+  // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
+  // than kMaxToplevelSourceSize.
+  // TODO(rmcilroy): Consider whether we should optimize small functions when
+  // they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
+
+  if (!frame_optimized && (function->IsMarkedForOptimization() ||
+                           function->IsMarkedForConcurrentOptimization() ||
+                           function->IsOptimized())) {
+    // TODO(rmcilroy): Support OSR in these cases.
+
+    return;
+  }
+
+  // Do not optimize non-optimizable functions.
+  if (shared->optimization_disabled()) {
+    if (shared->deopt_count() >= FLAG_max_opt_count) {
+      // If optimization was disabled due to many deoptimizations,
+      // then check if the function is hot and try to reenable optimization.
+      if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
+        shared->set_profiler_ticks(0);
+        shared->TryReenableOptimization();
+      }
+    }
+    return;
+  }
+
+  if (function->IsOptimized()) return;
+
+  if (ticks >= kProfilerTicksBeforeOptimization) {
+    int typeinfo, generic, total, type_percentage, generic_percentage;
+    GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
+                &generic_percentage);
+    if (type_percentage >= FLAG_type_info_threshold &&
+        generic_percentage <= FLAG_generic_ic_threshold) {
+      // If this particular function hasn't had any ICs patched for enough
+      // ticks, optimize it now.
+      Optimize(function, "hot and stable");
+    } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
+      Optimize(function, "not much type info but very hot");
+    } else {
+      if (FLAG_trace_opt_verbose) {
+        PrintF("[not yet optimizing ");
+        function->PrintName();
+        PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
+               type_percentage);
+      }
+    }
+  }
+}
+
+void RuntimeProfiler::MarkCandidatesForOptimization() {
   HandleScope scope(isolate_);
 
   if (!isolate_->use_crankshaft()) return;
@@ -155,9 +309,6 @@
     JavaScriptFrame* frame = it.frame();
     JSFunction* function = frame->function();
 
-    SharedFunctionInfo* shared = function->shared();
-    Code* shared_code = shared->code();
-
     List<JSFunction*> functions(4);
     frame->GetFunctions(&functions);
     for (int i = functions.length(); --i >= 0; ) {
@@ -168,94 +319,10 @@
       }
     }
 
-    if (shared_code->kind() != Code::FUNCTION) continue;
-    if (function->IsInOptimizationQueue()) continue;
-
-    if (FLAG_always_osr) {
-      AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker);
-      // Fall through and do a normal optimized compile as well.
-    } else if (!frame->is_optimized() &&
-        (function->IsMarkedForOptimization() ||
-         function->IsMarkedForConcurrentOptimization() ||
-         function->IsOptimized())) {
-      // Attempt OSR if we are still running unoptimized code even though the
-      // the function has long been marked or even already been optimized.
-      int ticks = shared_code->profiler_ticks();
-      int64_t allowance =
-          kOSRCodeSizeAllowanceBase +
-          static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick;
-      if (shared_code->CodeSize() > allowance &&
-          ticks < Code::ProfilerTicksField::kMax) {
-        shared_code->set_profiler_ticks(ticks + 1);
-      } else {
-        AttemptOnStackReplacement(function);
-      }
-      continue;
-    }
-
-    // Only record top-level code on top of the execution stack and
-    // avoid optimizing excessively large scripts since top-level code
-    // will be executed only once.
-    const int kMaxToplevelSourceSize = 10 * 1024;
-    if (shared->is_toplevel() &&
-        (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
-      continue;
-    }
-
-    // Do not record non-optimizable functions.
-    if (shared->optimization_disabled()) {
-      if (shared->deopt_count() >= FLAG_max_opt_count) {
-        // If optimization was disabled due to many deoptimizations,
-        // then check if the function is hot and try to reenable optimization.
-        int ticks = shared_code->profiler_ticks();
-        if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
-          shared_code->set_profiler_ticks(0);
-          shared->TryReenableOptimization();
-        } else {
-          shared_code->set_profiler_ticks(ticks + 1);
-        }
-      }
-      continue;
-    }
-    if (function->IsOptimized()) continue;
-
-    int ticks = shared_code->profiler_ticks();
-
-    if (ticks >= kProfilerTicksBeforeOptimization) {
-      int typeinfo, generic, total, type_percentage, generic_percentage;
-      GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
-                  &generic_percentage);
-      if (type_percentage >= FLAG_type_info_threshold &&
-          generic_percentage <= FLAG_generic_ic_threshold) {
-        // If this particular function hasn't had any ICs patched for enough
-        // ticks, optimize it now.
-        Optimize(function, "hot and stable");
-      } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
-        Optimize(function, "not much type info but very hot");
-      } else {
-        shared_code->set_profiler_ticks(ticks + 1);
-        if (FLAG_trace_opt_verbose) {
-          PrintF("[not yet optimizing ");
-          function->PrintName();
-          PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
-                 type_percentage);
-        }
-      }
-    } else if (!any_ic_changed_ &&
-               shared_code->instruction_size() < kMaxSizeEarlyOpt) {
-      // If no IC was patched since the last tick and this function is very
-      // small, optimistically optimize it now.
-      int typeinfo, generic, total, type_percentage, generic_percentage;
-      GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
-                  &generic_percentage);
-      if (type_percentage >= FLAG_type_info_threshold &&
-          generic_percentage <= FLAG_generic_ic_threshold) {
-        Optimize(function, "small function");
-      } else {
-        shared_code->set_profiler_ticks(ticks + 1);
-      }
+    if (FLAG_ignition) {
+      MaybeOptimizeIgnition(function, frame->is_optimized());
     } else {
-      shared_code->set_profiler_ticks(ticks + 1);
+      MaybeOptimizeFullCodegen(function, frame_count, frame->is_optimized());
     }
   }
   any_ic_changed_ = false;
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index 0d57929..aa2f65e 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -23,13 +23,16 @@
  public:
   explicit RuntimeProfiler(Isolate* isolate);
 
-  void OptimizeNow();
+  void MarkCandidatesForOptimization();
 
   void NotifyICChanged() { any_ic_changed_ = true; }
 
   void AttemptOnStackReplacement(JSFunction* function, int nesting_levels = 1);
 
  private:
+  void MaybeOptimizeFullCodegen(JSFunction* function, int frame_count,
+                                bool frame_optimized);
+  void MaybeOptimizeIgnition(JSFunction* function, bool frame_optimized);
   void Optimize(JSFunction* function, const char* reason);
 
   bool CodeSizeOKForOSR(Code* shared_code);
diff --git a/src/runtime/runtime-array.cc b/src/runtime/runtime-array.cc
index 28e92cb..f651ed4 100644
--- a/src/runtime/runtime-array.cc
+++ b/src/runtime/runtime-array.cc
@@ -120,9 +120,11 @@
 RUNTIME_FUNCTION(Runtime_RemoveArrayHoles) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
-  return *JSObject::PrepareElementsForSort(object, limit);
+  if (object->IsJSProxy()) return Smi::FromInt(-1);
+  return *JSObject::PrepareElementsForSort(Handle<JSObject>::cast(object),
+                                           limit);
 }
 
 
@@ -199,6 +201,15 @@
   CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
 
+  if (array->HasFastStringWrapperElements()) {
+    int string_length =
+        String::cast(Handle<JSValue>::cast(array)->value())->length();
+    int backing_store_length = array->elements()->length();
+    return *isolate->factory()->NewNumberFromUint(
+        Min(length,
+            static_cast<uint32_t>(Max(string_length, backing_store_length))));
+  }
+
   if (!array->elements()->IsDictionary()) {
     RUNTIME_ASSERT(array->HasFastSmiOrObjectElements() ||
                    array->HasFastDoubleElements());
@@ -206,8 +217,8 @@
     return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
   }
 
-  KeyAccumulator accumulator(isolate, ALL_PROPERTIES);
-  // No need to separate protoype levels since we only get numbers/element keys
+  KeyAccumulator accumulator(isolate, OWN_ONLY, ALL_PROPERTIES);
+  // No need to separate prototype levels since we only get element keys.
   for (PrototypeIterator iter(isolate, array,
                               PrototypeIterator::START_AT_RECEIVER);
        !iter.IsAtEnd(); iter.Advance()) {
@@ -480,15 +491,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_FastOneByteArrayJoin) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
-  // Returning undefined means that this fast path fails and one has to resort
-  // to a slow path.
-  return isolate->heap()->undefined_value();
-}
-
-
 RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
diff --git a/src/runtime/runtime-classes.cc b/src/runtime/runtime-classes.cc
index ccd15e8..e27685d 100644
--- a/src/runtime/runtime-classes.cc
+++ b/src/runtime/runtime-classes.cc
@@ -81,8 +81,7 @@
   return isolate->heap()->home_object_symbol();
 }
 
-
-static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
+static MaybeHandle<Object> DefineClass(Isolate* isolate,
                                        Handle<Object> super_class,
                                        Handle<JSFunction> constructor,
                                        int start_position, int end_position) {
@@ -105,8 +104,7 @@
       ASSIGN_RETURN_ON_EXCEPTION(
           isolate, prototype_parent,
           Runtime::GetObjectProperty(isolate, super_class,
-                                     isolate->factory()->prototype_string(),
-                                     SLOPPY),
+                                     isolate->factory()->prototype_string()),
           Object);
       if (!prototype_parent->IsNull() && !prototype_parent->IsJSReceiver()) {
         THROW_NEW_ERROR(
@@ -138,17 +136,12 @@
   map->SetConstructor(*constructor);
   Handle<JSObject> prototype = isolate->factory()->NewJSObjectFromMap(map);
 
-  Handle<String> name_string = name->IsString()
-                                   ? Handle<String>::cast(name)
-                                   : isolate->factory()->empty_string();
-  constructor->shared()->set_name(*name_string);
-
   if (!super_class->IsTheHole()) {
     // Derived classes, just like builtins, don't create implicit receivers in
     // [[construct]]. Instead they just set up new.target and call into the
     // constructor. Hence we can reuse the builtins construct stub for derived
     // classes.
-    Handle<Code> stub(isolate->builtins()->JSBuiltinsConstructStub());
+    Handle<Code> stub(isolate->builtins()->JSBuiltinsConstructStubForDerived());
     constructor->shared()->set_construct_stub(*stub);
   }
 
@@ -195,35 +188,20 @@
 
 RUNTIME_FUNCTION(Runtime_DefineClass) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 5);
-  CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 2);
-  CONVERT_SMI_ARG_CHECKED(start_position, 3);
-  CONVERT_SMI_ARG_CHECKED(end_position, 4);
+  DCHECK(args.length() == 4);
+  CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 1);
+  CONVERT_SMI_ARG_CHECKED(start_position, 2);
+  CONVERT_SMI_ARG_CHECKED(end_position, 3);
 
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, DefineClass(isolate, name, super_class, constructor,
+      isolate, result, DefineClass(isolate, super_class, constructor,
                                    start_position, end_position));
   return *result;
 }
 
 
-RUNTIME_FUNCTION(Runtime_DefineClassMethod) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 2);
-
-  RETURN_FAILURE_ON_EXCEPTION(isolate,
-                              JSObject::DefinePropertyOrElementIgnoreAttributes(
-                                  object, name, function, DONT_ENUM));
-  return isolate->heap()->undefined_value();
-}
-
-
 RUNTIME_FUNCTION(Runtime_FinalizeClassDefinition) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -244,12 +222,10 @@
   return *constructor;
 }
 
-
 static MaybeHandle<Object> LoadFromSuper(Isolate* isolate,
                                          Handle<Object> receiver,
                                          Handle<JSObject> home_object,
-                                         Handle<Name> name,
-                                         LanguageMode language_mode) {
+                                         Handle<Name> name) {
   if (home_object->IsAccessCheckNeeded() &&
       !isolate->MayAccess(handle(isolate->context()), home_object)) {
     isolate->ReportFailedAccessCheck(home_object);
@@ -259,22 +235,19 @@
   PrototypeIterator iter(isolate, home_object);
   Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
   if (!proto->IsJSReceiver()) {
-    return Object::ReadAbsentProperty(isolate, proto, name, language_mode);
+    return Object::ReadAbsentProperty(isolate, proto, name);
   }
 
   LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
   Handle<Object> result;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
-                             Object::GetProperty(&it, language_mode), Object);
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, result, Object::GetProperty(&it), Object);
   return result;
 }
 
-
 static MaybeHandle<Object> LoadElementFromSuper(Isolate* isolate,
                                                 Handle<Object> receiver,
                                                 Handle<JSObject> home_object,
-                                                uint32_t index,
-                                                LanguageMode language_mode) {
+                                                uint32_t index) {
   if (home_object->IsAccessCheckNeeded() &&
       !isolate->MayAccess(handle(isolate->context()), home_object)) {
     isolate->ReportFailedAccessCheck(home_object);
@@ -285,50 +258,44 @@
   Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
   if (!proto->IsJSReceiver()) {
     Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
-    return Object::ReadAbsentProperty(isolate, proto, name, language_mode);
+    return Object::ReadAbsentProperty(isolate, proto, name);
   }
 
   LookupIterator it(isolate, receiver, index, Handle<JSReceiver>::cast(proto));
   Handle<Object> result;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
-                             Object::GetProperty(&it, language_mode), Object);
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, result, Object::GetProperty(&it), Object);
   return result;
 }
 
 
-// TODO(conradw): It would be more efficient to have a separate runtime function
-// for strong mode.
 RUNTIME_FUNCTION(Runtime_LoadFromSuper) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
-  CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
 
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      LoadFromSuper(isolate, receiver, home_object, name, language_mode));
+      isolate, result, LoadFromSuper(isolate, receiver, home_object, name));
   return *result;
 }
 
 
 RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
-  CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
 
   uint32_t index = 0;
   Handle<Object> result;
 
   if (key->ToArrayIndex(&index)) {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, LoadElementFromSuper(isolate, receiver, home_object,
-                                              index, language_mode));
+        isolate, result,
+        LoadElementFromSuper(isolate, receiver, home_object, index));
     return *result;
   }
 
@@ -338,13 +305,12 @@
   // TODO(verwaest): Unify using LookupIterator.
   if (name->AsArrayIndex(&index)) {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, LoadElementFromSuper(isolate, receiver, home_object,
-                                              index, language_mode));
+        isolate, result,
+        LoadElementFromSuper(isolate, receiver, home_object, index));
     return *result;
   }
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      LoadFromSuper(isolate, receiver, home_object, name, language_mode));
+      isolate, result, LoadFromSuper(isolate, receiver, home_object, name));
   return *result;
 }
 
diff --git a/src/runtime/runtime-compiler.cc b/src/runtime/runtime-compiler.cc
index 15a3a14..263c4f9 100644
--- a/src/runtime/runtime-compiler.cc
+++ b/src/runtime/runtime-compiler.cc
@@ -52,8 +52,7 @@
   if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
 
   Handle<Code> code;
-  Handle<Code> unoptimized(function->shared()->code());
-  if (Compiler::GetOptimizedCode(function, unoptimized, mode).ToHandle(&code)) {
+  if (Compiler::GetOptimizedCode(function, mode).ToHandle(&code)) {
     // Optimization succeeded, return optimized code.
     function->ReplaceCode(*code);
   } else {
@@ -72,6 +71,8 @@
 
   DCHECK(function->code()->kind() == Code::FUNCTION ||
          function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
+         (function->code()->is_interpreter_entry_trampoline() &&
+          function->shared()->HasBytecodeArray()) ||
          function->IsInOptimizationQueue());
   return function->code();
 }
@@ -135,6 +136,8 @@
       static_cast<Deoptimizer::BailoutType>(type_arg);
   Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
   DCHECK(AllowHeapAllocation::IsAllowed());
+  TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
+  TRACE_EVENT0("v8", "V8.DeoptimizeCode");
 
   Handle<JSFunction> function = deoptimizer->function();
   Handle<Code> optimized_code = deoptimizer->compiled_code();
@@ -247,7 +250,6 @@
                                     function->shared()->ast_node_count() > 512)
                                        ? Compiler::CONCURRENT
                                        : Compiler::NOT_CONCURRENT;
-  Handle<Code> result = Handle<Code>::null();
 
   OptimizedCompileJob* job = NULL;
   if (mode == Compiler::CONCURRENT) {
@@ -268,22 +270,24 @@
     job = dispatcher->FindReadyOSRCandidate(function, ast_id);
   }
 
+  MaybeHandle<Code> maybe_result;
   if (job != NULL) {
     if (FLAG_trace_osr) {
       PrintF("[OSR - Found ready: ");
       function->PrintName();
       PrintF(" at AST id %d]\n", ast_id.ToInt());
     }
-    result = Compiler::GetConcurrentlyOptimizedCode(job);
+    maybe_result = Compiler::GetConcurrentlyOptimizedCode(job);
   } else if (IsSuitableForOnStackReplacement(isolate, function)) {
     if (FLAG_trace_osr) {
       PrintF("[OSR - Compiling: ");
       function->PrintName();
       PrintF(" at AST id %d]\n", ast_id.ToInt());
     }
-    MaybeHandle<Code> maybe_result = Compiler::GetOptimizedCode(
-        function, caller_code, mode, ast_id,
+    maybe_result = Compiler::GetOptimizedCode(
+        function, mode, ast_id,
         (mode == Compiler::NOT_CONCURRENT) ? frame : nullptr);
+    Handle<Code> result;
     if (maybe_result.ToHandle(&result) &&
         result.is_identical_to(isolate->builtins()->InOptimizationQueue())) {
       // Optimization is queued.  Return to check later.
@@ -295,7 +299,9 @@
   BackEdgeTable::Revert(isolate, *caller_code);
 
   // Check whether we ended up with usable optimized code.
-  if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
+  Handle<Code> result;
+  if (maybe_result.ToHandle(&result) &&
+      result->kind() == Code::OPTIMIZED_FUNCTION) {
     DeoptimizationInputData* data =
         DeoptimizationInputData::cast(result->deoptimization_data());
 
diff --git a/src/runtime/runtime-debug.cc b/src/runtime/runtime-debug.cc
index 80791de..c29ea9a 100644
--- a/src/runtime/runtime-debug.cc
+++ b/src/runtime/runtime-debug.cc
@@ -22,8 +22,7 @@
   // Get the top-most JavaScript frame.
   JavaScriptFrameIterator it(isolate);
   isolate->debug()->Break(args, it.frame());
-  isolate->debug()->SetAfterBreakTarget(it.frame());
-  return isolate->heap()->undefined_value();
+  return isolate->debug()->SetAfterBreakTarget(it.frame());
 }
 
 
@@ -82,7 +81,7 @@
           return it->isolate()->factory()->undefined_value();
         }
         MaybeHandle<Object> maybe_result =
-            JSObject::GetPropertyWithAccessor(it, SLOPPY);
+            JSObject::GetPropertyWithAccessor(it);
         Handle<Object> result;
         if (!maybe_result.ToHandle(&result)) {
           result = handle(it->isolate()->pending_exception(), it->isolate());
@@ -334,10 +333,14 @@
   details->set(
       2, isolate->heap()->ToBoolean(it.state() == LookupIterator::INTERCEPTOR));
   if (has_js_accessors) {
-    AccessorPair* accessors = AccessorPair::cast(*maybe_pair);
+    Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(maybe_pair);
     details->set(3, isolate->heap()->ToBoolean(has_caught));
-    details->set(4, accessors->GetComponent(ACCESSOR_GETTER));
-    details->set(5, accessors->GetComponent(ACCESSOR_SETTER));
+    Handle<Object> getter =
+        AccessorPair::GetComponent(accessors, ACCESSOR_GETTER);
+    Handle<Object> setter =
+        AccessorPair::GetComponent(accessors, ACCESSOR_SETTER);
+    details->set(4, *getter);
+    details->set(5, *setter);
   }
 
   return *isolate->factory()->NewJSArrayWithElements(details);
@@ -526,7 +529,8 @@
   bool constructor = frame_inspector.IsConstructor();
 
   // Get scope info and read from it for local variable information.
-  Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
+  Handle<JSFunction> function =
+      Handle<JSFunction>::cast(frame_inspector.GetFunction());
   RUNTIME_ASSERT(function->shared()->IsSubjectToDebugging());
   Handle<SharedFunctionInfo> shared(function->shared());
   Handle<ScopeInfo> scope_info(shared->scope_info());
@@ -550,13 +554,13 @@
     // Use the value from the stack.
     if (scope_info->LocalIsSynthetic(i)) continue;
     locals->set(local * 2, scope_info->LocalName(i));
-    locals->set(local * 2 + 1, frame_inspector.GetExpression(i));
+    locals->set(local * 2 + 1, *(frame_inspector.GetExpression(i)));
     local++;
   }
   if (local < local_count) {
     // Get the context containing declarations.
     Handle<Context> context(
-        Context::cast(frame_inspector.GetContext())->declaration_context());
+        Handle<Context>::cast(frame_inspector.GetContext())->closure_context());
     for (; i < scope_info->LocalCount(); ++i) {
       if (scope_info->LocalIsSynthetic(i)) continue;
       Handle<String> name(scope_info->LocalName(i));
@@ -635,7 +639,7 @@
   details->set(kFrameDetailsFrameIdIndex, *frame_id);
 
   // Add the function (same as in function frame).
-  details->set(kFrameDetailsFunctionIndex, frame_inspector.GetFunction());
+  details->set(kFrameDetailsFunctionIndex, *(frame_inspector.GetFunction()));
 
   // Add the arguments count.
   details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count));
@@ -685,7 +689,7 @@
     // Parameter value.
     if (i < frame_inspector.GetParametersCount()) {
       // Get the value from the stack.
-      details->set(details_index++, frame_inspector.GetParameter(i));
+      details->set(details_index++, *(frame_inspector.GetParameter(i)));
     } else {
       details->set(details_index++, heap->undefined_value());
     }
@@ -704,25 +708,7 @@
   // Add the receiver (same as in function frame).
   Handle<Object> receiver(it.frame()->receiver(), isolate);
   DCHECK(!function->shared()->IsBuiltin());
-  if (!receiver->IsJSObject() && is_sloppy(shared->language_mode())) {
-    // If the receiver is not a JSObject and the function is not a builtin or
-    // strict-mode we have hit an optimization where a value object is not
-    // converted into a wrapped JS objects. To hide this optimization from the
-    // debugger, we wrap the receiver by creating correct wrapper object based
-    // on the function's native context.
-    // See ECMA-262 6.0, 9.2.1.2, 6 b iii.
-    if (receiver->IsUndefined()) {
-      receiver = handle(function->global_proxy());
-    } else {
-      Context* context = function->context();
-      Handle<Context> native_context(Context::cast(context->native_context()));
-      if (!Object::ToObject(isolate, receiver, native_context)
-               .ToHandle(&receiver)) {
-        // This only happens if the receiver is forcibly set in %_CallFunction.
-        return heap->undefined_value();
-      }
-    }
-  }
+  DCHECK_IMPLIES(is_sloppy(shared->language_mode()), receiver->IsJSReceiver());
   details->set(kFrameDetailsReceiverIndex, *receiver);
 
   DCHECK_EQ(details_size, details_index);
@@ -1329,14 +1315,14 @@
   return *result;
 }
 
-
-static bool HasInPrototypeChainIgnoringProxies(Isolate* isolate, Object* object,
+static bool HasInPrototypeChainIgnoringProxies(Isolate* isolate,
+                                               JSObject* object,
                                                Object* proto) {
   PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
   while (true) {
     iter.AdvanceIgnoringProxies();
     if (iter.IsAtEnd()) return false;
-    if (iter.IsAtEnd(proto)) return true;
+    if (iter.GetCurrent() == proto) return true;
   }
 }
 
@@ -1443,7 +1429,7 @@
   // TODO(1543): Come up with a solution for clients to handle potential errors
   // thrown by an intermediate proxy.
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
-                                     Object::GetPrototype(isolate, obj));
+                                     JSReceiver::GetPrototype(isolate, obj));
   return *prototype;
 }
 
diff --git a/src/runtime/runtime-forin.cc b/src/runtime/runtime-forin.cc
index ff6804c..c44945c 100644
--- a/src/runtime/runtime-forin.cc
+++ b/src/runtime/runtime-forin.cc
@@ -5,11 +5,90 @@
 #include "src/runtime/runtime-utils.h"
 
 #include "src/arguments.h"
+#include "src/factory.h"
+#include "src/isolate-inl.h"
 #include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
+namespace {
+
+// Returns either a FixedArray or, if the given {receiver} has an enum cache
+// that contains all enumerable properties of the {receiver} and its prototypes
+// have none, the map of the {receiver}. This is used to speed up the check for
+// deletions during a for-in.
+MaybeHandle<HeapObject> Enumerate(Handle<JSReceiver> receiver) {
+  Isolate* const isolate = receiver->GetIsolate();
+  // Test if we have an enum cache for {receiver}.
+  if (!receiver->IsSimpleEnum()) {
+    Handle<FixedArray> keys;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, keys,
+        JSReceiver::GetKeys(receiver, INCLUDE_PROTOS, ENUMERABLE_STRINGS),
+        HeapObject);
+    // Test again, since cache may have been built by GetKeys() calls above.
+    if (!receiver->IsSimpleEnum()) return keys;
+  }
+  return handle(receiver->map(), isolate);
+}
+
+
+MaybeHandle<Object> Filter(Handle<JSReceiver> receiver, Handle<Object> key) {
+  Isolate* const isolate = receiver->GetIsolate();
+  // TODO(turbofan): Fast case for array indices.
+  Handle<Name> name;
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
+                             Object);
+  Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
+  MAYBE_RETURN_NULL(result);
+  if (result.FromJust()) return name;
+  return isolate->factory()->undefined_value();
+}
+
+}  // namespace
+
+
+RUNTIME_FUNCTION(Runtime_ForInEnumerate) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+  Handle<HeapObject> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Enumerate(receiver));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION_RETURN_TRIPLE(Runtime_ForInPrepare) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
+  Handle<Object> cache_type;
+  if (!Enumerate(receiver).ToHandle(&cache_type)) {
+    return MakeTriple(isolate->heap()->exception(), nullptr, nullptr);
+  }
+  Handle<FixedArray> cache_array;
+  int cache_length;
+  if (cache_type->IsMap()) {
+    Handle<Map> cache_map = Handle<Map>::cast(cache_type);
+    Handle<DescriptorArray> descriptors(cache_map->instance_descriptors(),
+                                        isolate);
+    cache_length = cache_map->EnumLength();
+    if (cache_length && descriptors->HasEnumCache()) {
+      cache_array = handle(descriptors->GetEnumCache(), isolate);
+    } else {
+      cache_array = isolate->factory()->empty_fixed_array();
+      cache_length = 0;
+    }
+  } else {
+    cache_array = Handle<FixedArray>::cast(cache_type);
+    cache_length = cache_array->length();
+    cache_type = handle(Smi::FromInt(1), isolate);
+  }
+  return MakeTriple(*cache_type, *cache_array, Smi::FromInt(cache_length));
+}
+
+
 RUNTIME_FUNCTION(Runtime_ForInDone) {
   SealHandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -26,15 +105,9 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
-  // TODO(turbofan): Fast case for array indices.
-  Handle<Name> name;
-  if (!Object::ToName(isolate, key).ToHandle(&name)) {
-    return isolate->heap()->exception();
-  }
-  Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
-  if (!result.IsJust()) return isolate->heap()->exception();
-  if (result.FromJust()) return *name;
-  return isolate->heap()->undefined_value();
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Filter(receiver, key));
+  return *result;
 }
 
 
@@ -46,20 +119,13 @@
   CONVERT_ARG_HANDLE_CHECKED(Object, cache_type, 2);
   CONVERT_SMI_ARG_CHECKED(index, 3);
   Handle<Object> key = handle(cache_array->get(index), isolate);
-  // Don't need filtering if expected map still matches that of the receiver,
-  // and neither for proxies.
-  if (receiver->map() == *cache_type || *cache_type == Smi::FromInt(0)) {
+  // Don't need filtering if expected map still matches that of the receiver.
+  if (receiver->map() == *cache_type) {
     return *key;
   }
-  // TODO(turbofan): Fast case for array indices.
-  Handle<Name> name;
-  if (!Object::ToName(isolate, key).ToHandle(&name)) {
-    return isolate->heap()->exception();
-  }
-  Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
-  if (!result.IsJust()) return isolate->heap()->exception();
-  if (result.FromJust()) return *name;
-  return isolate->heap()->undefined_value();
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Filter(receiver, key));
+  return *result;
 }
 
 
diff --git a/src/runtime/runtime-function.cc b/src/runtime/runtime-function.cc
index befd337..d424a9e 100644
--- a/src/runtime/runtime-function.cc
+++ b/src/runtime/runtime-function.cc
@@ -55,11 +55,14 @@
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
 
-  if (function->IsJSBoundFunction()) return isolate->heap()->undefined_value();
-  Handle<Object> script(Handle<JSFunction>::cast(function)->shared()->script(),
-                        isolate);
-  if (!script->IsScript()) return isolate->heap()->undefined_value();
-  return *Script::GetWrapper(Handle<Script>::cast(script));
+  if (function->IsJSFunction()) {
+    Handle<Object> script(
+        Handle<JSFunction>::cast(function)->shared()->script(), isolate);
+    if (script->IsScript()) {
+      return *Script::GetWrapper(Handle<Script>::cast(script));
+    }
+  }
+  return isolate->heap()->undefined_value();
 }
 
 
@@ -67,8 +70,10 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
-  if (function->IsJSBoundFunction()) return isolate->heap()->undefined_value();
-  return *Handle<JSFunction>::cast(function)->shared()->GetSourceCode();
+  if (function->IsJSFunction()) {
+    return *Handle<JSFunction>::cast(function)->shared()->GetSourceCode();
+  }
+  return isolate->heap()->undefined_value();
 }
 
 
@@ -86,13 +91,9 @@
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(Code, code, 0);
+  CONVERT_ARG_CHECKED(AbstractCode, abstract_code, 0);
   CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
-
-  RUNTIME_ASSERT(0 <= offset && offset < code->Size());
-
-  Address pc = code->address() + offset;
-  return Smi::FromInt(code->SourcePosition(pc));
+  return Smi::FromInt(abstract_code->SourcePosition(offset));
 }
 
 
@@ -166,6 +167,9 @@
   // Set the code, scope info, formal parameter count, and the length
   // of the target shared function info.
   target_shared->ReplaceCode(source_shared->code());
+  if (source_shared->HasBytecodeArray()) {
+    target_shared->set_function_data(source_shared->bytecode_array());
+  }
   target_shared->set_scope_info(source_shared->scope_info());
   target_shared->set_length(source_shared->length());
   target_shared->set_feedback_vector(source_shared->feedback_vector());
@@ -232,7 +236,6 @@
   return isolate->heap()->ToBoolean(object->IsConstructor());
 }
 
-
 RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
   SealHandleScope shs(isolate);
   RUNTIME_ASSERT(args.length() == 1);
diff --git a/src/runtime/runtime-generator.cc b/src/runtime/runtime-generator.cc
index 926cd3c..dab0621 100644
--- a/src/runtime/runtime-generator.cc
+++ b/src/runtime/runtime-generator.cc
@@ -22,11 +22,8 @@
   RUNTIME_ASSERT(function->shared()->is_generator());
 
   Handle<JSGeneratorObject> generator;
-  if (frame->IsConstructor()) {
-    generator = handle(JSGeneratorObject::cast(frame->receiver()));
-  } else {
-    generator = isolate->factory()->NewJSGeneratorObject(function);
-  }
+  DCHECK(!frame->IsConstructor());
+  generator = isolate->factory()->NewJSGeneratorObject(function);
   generator->set_function(*function);
   generator->set_context(Context::cast(frame->context()));
   generator->set_receiver(frame->receiver());
@@ -39,7 +36,7 @@
 
 RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
   HandleScope handle_scope(isolate);
-  DCHECK(args.length() == 1 || args.length() == 2);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0);
 
   JavaScriptFrameIterator stack_iterator(isolate);
@@ -58,18 +55,6 @@
   DCHECK_GE(operands_count, 1 + args.length());
   operands_count -= 1 + args.length();
 
-  // Second argument indicates that we need to patch the handler table because
-  // a delegating yield introduced a try-catch statement at expression level,
-  // hence the operand count was off when we statically computed it.
-  // TODO(mstarzinger): This special case disappears with do-expressions.
-  if (args.length() == 2) {
-    CONVERT_SMI_ARG_CHECKED(handler_index, 1);
-    Handle<Code> code(frame->unchecked_code());
-    Handle<HandlerTable> table(HandlerTable::cast(code->handler_table()));
-    int handler_depth = operands_count - TryBlockConstant::kElementCount;
-    table->SetRangeDepth(handler_index, handler_depth);
-  }
-
   if (operands_count == 0) {
     // Although it's semantically harmless to call this function with an
     // operands_count of zero, it is also unnecessary.
@@ -90,9 +75,9 @@
 // called if the suspended activation had operands on the stack, stack handlers
 // needing rewinding, or if the resume should throw an exception.  The fast path
 // is handled directly in FullCodeGenerator::EmitGeneratorResume(), which is
-// inlined into GeneratorNext and GeneratorThrow.  EmitGeneratorResumeResume is
-// called in any case, as it needs to reconstruct the stack frame and make space
-// for arguments and operands.
+// inlined into GeneratorNext, GeneratorReturn, and GeneratorThrow.
+// EmitGeneratorResume is called in any case, as it needs to reconstruct the
+// stack frame and make space for arguments and operands.
 RUNTIME_FUNCTION(Runtime_ResumeJSGeneratorObject) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 3);
@@ -128,7 +113,10 @@
   JSGeneratorObject::ResumeMode resume_mode =
       static_cast<JSGeneratorObject::ResumeMode>(resume_mode_int);
   switch (resume_mode) {
+    // Note: this looks like NEXT and RETURN are the same but RETURN receives
+    // special treatment in the generator code (to which we return here).
     case JSGeneratorObject::NEXT:
+    case JSGeneratorObject::RETURN:
       return value;
     case JSGeneratorObject::THROW:
       return isolate->Throw(value);
@@ -180,6 +168,16 @@
 }
 
 
+// Returns input of generator activation.
+RUNTIME_FUNCTION(Runtime_GeneratorGetInput) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+  return generator->input();
+}
+
+
 // Returns generator continuation as a PC offset, or the magic -1 or 0 values.
 RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
   HandleScope scope(isolate);
@@ -198,26 +196,33 @@
   if (generator->is_suspended()) {
     Handle<Code> code(generator->function()->code(), isolate);
     int offset = generator->continuation();
-
-    RUNTIME_ASSERT(0 <= offset && offset < code->Size());
-    Address pc = code->address() + offset;
-
-    return Smi::FromInt(code->SourcePosition(pc));
+    RUNTIME_ASSERT(0 <= offset && offset < code->instruction_size());
+    return Smi::FromInt(code->SourcePosition(offset));
   }
 
   return isolate->heap()->undefined_value();
 }
 
 
+// Optimization for the following three functions is disabled in
+// js/generator.js and compiler/ast-graph-builder.cc.
+
+
 RUNTIME_FUNCTION(Runtime_GeneratorNext) {
-  UNREACHABLE();  // Optimization disabled in SetUpGenerators().
-  return NULL;
+  UNREACHABLE();
+  return nullptr;
+}
+
+
+RUNTIME_FUNCTION(Runtime_GeneratorReturn) {
+  UNREACHABLE();
+  return nullptr;
 }
 
 
 RUNTIME_FUNCTION(Runtime_GeneratorThrow) {
-  UNREACHABLE();  // Optimization disabled in SetUpGenerators().
-  return NULL;
+  UNREACHABLE();
+  return nullptr;
 }
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-i18n.cc b/src/runtime/runtime-i18n.cc
index e1f0c8e..e57f8d3 100644
--- a/src/runtime/runtime-i18n.cc
+++ b/src/runtime/runtime-i18n.cc
@@ -586,8 +586,9 @@
 
   // TODO(mnita): check Normalizer2 (not available in ICU 46)
   UErrorCode status = U_ZERO_ERROR;
+  icu::UnicodeString input(false, u_value, string_value.length());
   icu::UnicodeString result;
-  icu::Normalizer::normalize(u_value, normalizationForms[form_id], 0, result,
+  icu::Normalizer::normalize(input, normalizationForms[form_id], 0, result,
                              status);
   if (U_FAILURE(status)) {
     return isolate->heap()->undefined_value();
diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc
index ee66464..0ca2e84 100644
--- a/src/runtime/runtime-internal.cc
+++ b/src/runtime/runtime-internal.cc
@@ -395,7 +395,7 @@
       List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
       it.frame()->Summarize(&frames);
       FrameSummary& summary = frames.last();
-      int pos = summary.code()->SourcePosition(summary.pc());
+      int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
       *target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
       return true;
     }
@@ -448,6 +448,14 @@
 }
 
 
+RUNTIME_FUNCTION(Runtime_ThrowDerivedConstructorReturnedNonObject) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(0, args.length());
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kDerivedConstructorReturn));
+}
+
+
 // ES6 section 7.3.17 CreateListFromArrayLike (obj)
 RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
   HandleScope scope(isolate);
@@ -469,5 +477,17 @@
   return isolate->heap()->undefined_value();
 }
 
+
+RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(0, args.length());
+  std::stringstream stats_stream;
+  isolate->counters()->runtime_call_stats()->Print(stats_stream);
+  Handle<String> result =
+      isolate->factory()->NewStringFromAsciiChecked(stats_stream.str().c_str());
+  isolate->counters()->runtime_call_stats()->Reset();
+  return *result;
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-interpreter.cc b/src/runtime/runtime-interpreter.cc
index d061a49..7150a8b 100644
--- a/src/runtime/runtime-interpreter.cc
+++ b/src/runtime/runtime-interpreter.cc
@@ -4,115 +4,18 @@
 
 #include "src/runtime/runtime-utils.h"
 
+#include <iomanip>
+
 #include "src/arguments.h"
+#include "src/frames-inl.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecodes.h"
 #include "src/isolate-inl.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
 
-
-RUNTIME_FUNCTION(Runtime_InterpreterEquals) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
-  Maybe<bool> result = Object::Equals(x, y);
-  if (result.IsJust()) {
-    return isolate->heap()->ToBoolean(result.FromJust());
-  } else {
-    return isolate->heap()->exception();
-  }
-}
-
-
-RUNTIME_FUNCTION(Runtime_InterpreterNotEquals) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
-  Maybe<bool> result = Object::Equals(x, y);
-  if (result.IsJust()) {
-    return isolate->heap()->ToBoolean(!result.FromJust());
-  } else {
-    return isolate->heap()->exception();
-  }
-}
-
-
-RUNTIME_FUNCTION(Runtime_InterpreterLessThan) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
-  Maybe<bool> result = Object::LessThan(x, y);
-  if (result.IsJust()) {
-    return isolate->heap()->ToBoolean(result.FromJust());
-  } else {
-    return isolate->heap()->exception();
-  }
-}
-
-
-RUNTIME_FUNCTION(Runtime_InterpreterGreaterThan) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
-  Maybe<bool> result = Object::GreaterThan(x, y);
-  if (result.IsJust()) {
-    return isolate->heap()->ToBoolean(result.FromJust());
-  } else {
-    return isolate->heap()->exception();
-  }
-}
-
-
-RUNTIME_FUNCTION(Runtime_InterpreterLessThanOrEqual) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
-  Maybe<bool> result = Object::LessThanOrEqual(x, y);
-  if (result.IsJust()) {
-    return isolate->heap()->ToBoolean(result.FromJust());
-  } else {
-    return isolate->heap()->exception();
-  }
-}
-
-
-RUNTIME_FUNCTION(Runtime_InterpreterGreaterThanOrEqual) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
-  Maybe<bool> result = Object::GreaterThanOrEqual(x, y);
-  if (result.IsJust()) {
-    return isolate->heap()->ToBoolean(result.FromJust());
-  } else {
-    return isolate->heap()->exception();
-  }
-}
-
-
-RUNTIME_FUNCTION(Runtime_InterpreterStrictEquals) {
-  SealHandleScope shs(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_CHECKED(Object, x, 0);
-  CONVERT_ARG_CHECKED(Object, y, 1);
-  return isolate->heap()->ToBoolean(x->StrictEquals(y));
-}
-
-
-RUNTIME_FUNCTION(Runtime_InterpreterStrictNotEquals) {
-  SealHandleScope shs(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_CHECKED(Object, x, 0);
-  CONVERT_ARG_CHECKED(Object, y, 1);
-  return isolate->heap()->ToBoolean(!x->StrictEquals(y));
-}
-
-
 RUNTIME_FUNCTION(Runtime_InterpreterToBoolean) {
   SealHandleScope shs(isolate);
   DCHECK_EQ(1, args.length());
@@ -130,7 +33,7 @@
 
 
 RUNTIME_FUNCTION(Runtime_InterpreterTypeOf) {
-  SealHandleScope shs(isolate);
+  HandleScope shs(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
   return Object::cast(*Object::TypeOf(isolate, x));
@@ -147,55 +50,121 @@
       shared, context, static_cast<PretenureFlag>(pretenured_flag));
 }
 
+namespace {
 
-RUNTIME_FUNCTION(Runtime_InterpreterForInPrepare) {
-  HandleScope scope(isolate);
+void PrintRegisters(std::ostream& os, bool is_input,
+                    Handle<BytecodeArray> bytecode_array, int bytecode_offset,
+                    Handle<Object> accumulator) {
+  static const int kRegFieldWidth = static_cast<int>(strlen("accumulator"));
+  static const char* kInputColourCode = "\033[0;36m";
+  static const char* kOutputColourCode = "\033[0;35m";
+  static const char* kNormalColourCode = "\033[0;m";
+  const char* kArrowDirection = is_input ? " -> " : " <- ";
+  if (FLAG_log_colour) {
+    os << (is_input ? kInputColourCode : kOutputColourCode);
+  }
+
+  // Print accumulator.
+  os << "      [ accumulator" << kArrowDirection;
+  accumulator->ShortPrint();
+  os << " ]" << std::endl;
+
+  // Find the location of the register file.
+  JavaScriptFrameIterator frame_iterator(bytecode_array->GetIsolate());
+  JavaScriptFrame* frame = frame_iterator.frame();
+  Address register_file =
+      frame->fp() + InterpreterFrameConstants::kRegisterFilePointerFromFp;
+
+  // Print the registers.
+  interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
+  bytecode_iterator.set_current_offset(
+      bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag);
+  interpreter::Bytecode bytecode = bytecode_iterator.current_bytecode();
+  int operand_count = interpreter::Bytecodes::NumberOfOperands(bytecode);
+  for (int operand_index = 0; operand_index < operand_count; operand_index++) {
+    interpreter::OperandType operand_type =
+        interpreter::Bytecodes::GetOperandType(bytecode, operand_index);
+    bool should_print =
+        is_input
+            ? interpreter::Bytecodes::IsRegisterInputOperandType(operand_type)
+            : interpreter::Bytecodes::IsRegisterOutputOperandType(operand_type);
+    if (should_print) {
+      interpreter::Register first_reg =
+          bytecode_iterator.GetRegisterOperand(operand_index);
+      int range = bytecode_iterator.GetRegisterOperandRange(operand_index);
+      for (int reg_index = first_reg.index();
+           reg_index < first_reg.index() + range; reg_index++) {
+        Address reg_location = register_file - reg_index * kPointerSize;
+        Object* reg_object = Memory::Object_at(reg_location);
+        os << "      [ " << std::setw(kRegFieldWidth)
+           << interpreter::Register(reg_index).ToString(
+                  bytecode_array->parameter_count())
+           << kArrowDirection;
+        reg_object->ShortPrint(os);
+        os << " ]" << std::endl;
+      }
+    }
+  }
+  if (FLAG_log_colour) {
+    os << kNormalColourCode;
+  }
+}
+
+}  // namespace
+
+RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(3, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
+  CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
+  OFStream os(stdout);
+
+  // Print bytecode.
+  const uint8_t* bytecode_address =
+      reinterpret_cast<const uint8_t*>(*bytecode_array) + bytecode_offset;
+  Vector<char> buf = Vector<char>::New(50);
+  SNPrintF(buf, "%p", bytecode_address);
+  os << " -> " << buf.start() << " (" << bytecode_offset << ") : ";
+  interpreter::Bytecodes::Decode(os, bytecode_address,
+                                 bytecode_array->parameter_count());
+  os << std::endl;
+
+  // Print all input registers and accumulator.
+  PrintRegisters(os, true, bytecode_array, bytecode_offset, accumulator);
+
+  os << std::flush;
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(3, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
+  CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
+  OFStream os(stdout);
+
+  // Print all output registers and accumulator.
+  PrintRegisters(os, false, bytecode_array, bytecode_offset, accumulator);
+  os << std::flush;
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_InterpreterClearPendingMessage) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(0, args.length());
+  Object* message = isolate->thread_local_top()->pending_message_obj_;
+  isolate->clear_pending_message();
+  return message;
+}
+
+RUNTIME_FUNCTION(Runtime_InterpreterSetPendingMessage) {
+  SealHandleScope shs(isolate);
   DCHECK_EQ(1, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
-
-  Object* property_names = Runtime_GetPropertyNamesFast(
-      1, Handle<Object>::cast(receiver).location(), isolate);
-  if (isolate->has_pending_exception()) {
-    return property_names;
-  }
-
-  Handle<Object> cache_type(property_names, isolate);
-  Handle<FixedArray> cache_array;
-  int cache_length;
-
-  Handle<Map> receiver_map = handle(receiver->map(), isolate);
-  if (cache_type->IsMap()) {
-    Handle<Map> cache_type_map =
-        handle(Handle<Map>::cast(cache_type)->map(), isolate);
-    DCHECK(cache_type_map.is_identical_to(isolate->factory()->meta_map()));
-    int enum_length = cache_type_map->EnumLength();
-    DescriptorArray* descriptors = receiver_map->instance_descriptors();
-    if (enum_length > 0 && descriptors->HasEnumCache()) {
-      cache_array = handle(descriptors->GetEnumCache(), isolate);
-      cache_length = cache_array->length();
-    } else {
-      cache_array = isolate->factory()->empty_fixed_array();
-      cache_length = 0;
-    }
-  } else {
-    cache_array = Handle<FixedArray>::cast(cache_type);
-    cache_length = cache_array->length();
-
-    STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
-    if (receiver_map->instance_type() == JS_PROXY_TYPE) {
-      // Zero indicates proxy
-      cache_type = Handle<Object>(Smi::FromInt(0), isolate);
-    } else {
-      // One entails slow check
-      cache_type = Handle<Object>(Smi::FromInt(1), isolate);
-    }
-  }
-
-  Handle<FixedArray> result = isolate->factory()->NewFixedArray(3);
-  result->set(0, *cache_type);
-  result->set(1, *cache_array);
-  result->set(2, Smi::FromInt(cache_length));
-  return *result;
+  CONVERT_ARG_HANDLE_CHECKED(Object, message, 0);
+  isolate->thread_local_top()->pending_message_obj_ = *message;
+  return isolate->heap()->undefined_value();
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-json.cc b/src/runtime/runtime-json.cc
index 45f8183..07232d5 100644
--- a/src/runtime/runtime-json.cc
+++ b/src/runtime/runtime-json.cc
@@ -7,9 +7,9 @@
 #include "src/arguments.h"
 #include "src/char-predicates-inl.h"
 #include "src/isolate-inl.h"
+#include "src/json-parser.h"
 #include "src/json-stringifier.h"
 #include "src/objects-inl.h"
-#include "src/parsing/json-parser.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/runtime/runtime-literals.cc b/src/runtime/runtime-literals.cc
index b0e41dc..e730957 100644
--- a/src/runtime/runtime-literals.cc
+++ b/src/runtime/runtime-literals.cc
@@ -400,59 +400,5 @@
   return *result;
 }
 
-
-RUNTIME_FUNCTION(Runtime_StoreArrayLiteralElement) {
-  HandleScope scope(isolate);
-  RUNTIME_ASSERT(args.length() == 5);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-  CONVERT_SMI_ARG_CHECKED(store_index, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
-  CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 3);
-  CONVERT_SMI_ARG_CHECKED(literal_index, 4);
-
-  Object* raw_literal_cell = literals->literal(literal_index);
-  JSArray* boilerplate = NULL;
-  if (raw_literal_cell->IsAllocationSite()) {
-    AllocationSite* site = AllocationSite::cast(raw_literal_cell);
-    boilerplate = JSArray::cast(site->transition_info());
-  } else {
-    boilerplate = JSArray::cast(raw_literal_cell);
-  }
-  Handle<JSArray> boilerplate_object(boilerplate);
-  ElementsKind elements_kind = object->GetElementsKind();
-  DCHECK(IsFastElementsKind(elements_kind));
-  // Smis should never trigger transitions.
-  DCHECK(!value->IsSmi());
-
-  if (value->IsNumber()) {
-    DCHECK(IsFastSmiElementsKind(elements_kind));
-    ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
-                                         ? FAST_HOLEY_DOUBLE_ELEMENTS
-                                         : FAST_DOUBLE_ELEMENTS;
-    if (IsMoreGeneralElementsKindTransition(
-            boilerplate_object->GetElementsKind(), transitioned_kind)) {
-      JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
-    }
-    JSObject::TransitionElementsKind(object, transitioned_kind);
-    DCHECK(IsFastDoubleElementsKind(object->GetElementsKind()));
-    FixedDoubleArray* double_array = FixedDoubleArray::cast(object->elements());
-    HeapNumber* number = HeapNumber::cast(*value);
-    double_array->set(store_index, number->Number());
-  } else {
-    if (!IsFastObjectElementsKind(elements_kind)) {
-      ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
-                                           ? FAST_HOLEY_ELEMENTS
-                                           : FAST_ELEMENTS;
-      JSObject::TransitionElementsKind(object, transitioned_kind);
-      if (IsMoreGeneralElementsKindTransition(
-              boilerplate_object->GetElementsKind(), transitioned_kind)) {
-        JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
-      }
-    }
-    FixedArray* object_array = FixedArray::cast(object->elements());
-    object_array->set(store_index, *value);
-  }
-  return *object;
-}
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-maths.cc b/src/runtime/runtime-maths.cc
index 427d2b8..9c4fde1 100644
--- a/src/runtime/runtime-maths.cc
+++ b/src/runtime/runtime-maths.cc
@@ -14,13 +14,13 @@
 namespace v8 {
 namespace internal {
 
-#define RUNTIME_UNARY_MATH(Name, name)                       \
-  RUNTIME_FUNCTION(Runtime_Math##Name) {                     \
-    HandleScope scope(isolate);                              \
-    DCHECK(args.length() == 1);                              \
-    isolate->counters()->math_##name()->Increment();         \
-    CONVERT_DOUBLE_ARG_CHECKED(x, 0);                        \
-    return *isolate->factory()->NewHeapNumber(std::name(x)); \
+#define RUNTIME_UNARY_MATH(Name, name)                         \
+  RUNTIME_FUNCTION(Runtime_Math##Name) {                       \
+    HandleScope scope(isolate);                                \
+    DCHECK(args.length() == 1);                                \
+    isolate->counters()->math_##name##_runtime()->Increment(); \
+    CONVERT_DOUBLE_ARG_CHECKED(x, 0);                          \
+    return *isolate->factory()->NewHeapNumber(std::name(x));   \
   }
 
 RUNTIME_UNARY_MATH(Acos, acos)
@@ -81,8 +81,7 @@
 RUNTIME_FUNCTION(Runtime_MathAtan2) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
-  isolate->counters()->math_atan2()->Increment();
-
+  isolate->counters()->math_atan2_runtime()->Increment();
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   double result;
@@ -104,7 +103,7 @@
 RUNTIME_FUNCTION(Runtime_MathExpRT) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
-  isolate->counters()->math_exp()->Increment();
+  isolate->counters()->math_exp_runtime()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   lazily_initialize_fast_exp(isolate);
@@ -115,7 +114,7 @@
 RUNTIME_FUNCTION(Runtime_MathClz32) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
-  isolate->counters()->math_clz32()->Increment();
+  isolate->counters()->math_clz32_runtime()->Increment();
 
   CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
   return *isolate->factory()->NewNumberFromUint(
@@ -126,7 +125,7 @@
 RUNTIME_FUNCTION(Runtime_MathFloor) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
-  isolate->counters()->math_floor()->Increment();
+  isolate->counters()->math_floor_runtime()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return *isolate->factory()->NewNumber(Floor(x));
@@ -138,7 +137,7 @@
 RUNTIME_FUNCTION(Runtime_MathPow) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
-  isolate->counters()->math_pow()->Increment();
+  isolate->counters()->math_pow_runtime()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
 
@@ -161,7 +160,7 @@
 RUNTIME_FUNCTION(Runtime_MathPowRT) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
-  isolate->counters()->math_pow()->Increment();
+  isolate->counters()->math_pow_runtime()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
@@ -179,7 +178,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(input, 0);
-  isolate->counters()->math_round()->Increment();
+  isolate->counters()->math_round_runtime()->Increment();
 
   if (!input->IsHeapNumber()) {
     DCHECK(input->IsSmi());
@@ -221,7 +220,7 @@
 RUNTIME_FUNCTION(Runtime_MathSqrt) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
-  isolate->counters()->math_sqrt()->Increment();
+  isolate->counters()->math_sqrt_runtime()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   lazily_initialize_fast_sqrt(isolate);
@@ -239,16 +238,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_IsMinusZero) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_CHECKED(Object, obj, 0);
-  if (!obj->IsHeapNumber()) return isolate->heap()->false_value();
-  HeapNumber* number = HeapNumber::cast(obj);
-  return isolate->heap()->ToBoolean(IsMinusZero(number->value()));
-}
-
-
 RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc
index 75ddb7b..45a4992 100644
--- a/src/runtime/runtime-object.cc
+++ b/src/runtime/runtime-object.cc
@@ -15,11 +15,9 @@
 namespace v8 {
 namespace internal {
 
-
 MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
                                                Handle<Object> object,
-                                               Handle<Object> key,
-                                               LanguageMode language_mode) {
+                                               Handle<Object> key) {
   if (object->IsUndefined() || object->IsNull()) {
     THROW_NEW_ERROR(
         isolate,
@@ -32,14 +30,12 @@
       LookupIterator::PropertyOrElement(isolate, object, key, &success);
   if (!success) return MaybeHandle<Object>();
 
-  return Object::GetProperty(&it, language_mode);
+  return Object::GetProperty(&it);
 }
 
-
 static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
                                                   Handle<Object> receiver_obj,
-                                                  Handle<Object> key_obj,
-                                                  LanguageMode language_mode) {
+                                                  Handle<Object> key_obj) {
   // Fast cases for getting named properties of the receiver JSObject
   // itself.
   //
@@ -113,8 +109,7 @@
   }
 
   // Fall back to GetObjectProperty.
-  return Runtime::GetObjectProperty(isolate, receiver_obj, key_obj,
-                                    language_mode);
+  return Runtime::GetObjectProperty(isolate, receiver_obj, key_obj);
 }
 
 
@@ -158,10 +153,10 @@
 RUNTIME_FUNCTION(Runtime_GetPrototype) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
   Handle<Object> prototype;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
-                                     Object::GetPrototype(isolate, obj));
+                                     JSReceiver::GetPrototype(isolate, obj));
   return *prototype;
 }
 
@@ -230,8 +225,10 @@
   if (is_accessor_pair) {
     Handle<AccessorPair> accessors =
         Handle<AccessorPair>::cast(it.GetAccessors());
-    Handle<Object> getter(accessors->GetComponent(ACCESSOR_GETTER), isolate);
-    Handle<Object> setter(accessors->GetComponent(ACCESSOR_SETTER), isolate);
+    Handle<Object> getter =
+        AccessorPair::GetComponent(accessors, ACCESSOR_GETTER);
+    Handle<Object> setter =
+        AccessorPair::GetComponent(accessors, ACCESSOR_SETTER);
     elms->set(GETTER_INDEX, *getter);
     elms->set(SETTER_INDEX, *setter);
   } else {
@@ -266,31 +263,6 @@
 }
 
 
-// ES6 19.1.2.6
-RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, raw_name, 1);
-  // 1. Let obj be ? ToObject(O).
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
-                                     Execution::ToObject(isolate, object));
-  // 2. Let key be ? ToPropertyKey(P).
-  Handle<Name> key;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
-                                     Object::ToName(isolate, raw_name));
-
-  // 3. Let desc be ? obj.[[GetOwnProperty]](key).
-  PropertyDescriptor desc;
-  Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
-      isolate, Handle<JSReceiver>::cast(object), key, &desc);
-  MAYBE_RETURN(found, isolate->heap()->exception());
-  // 4. Return FromPropertyDescriptor(desc).
-  if (!found.FromJust()) return isolate->heap()->undefined_value();
-  return *desc.ToObject(isolate);
-}
-
-
 RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -409,23 +381,7 @@
 
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Runtime::GetObjectProperty(isolate, object, key, SLOPPY));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetPropertyStrong) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-
-  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
-
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Runtime::GetObjectProperty(isolate, object, key, STRONG));
+      isolate, result, Runtime::GetObjectProperty(isolate, object, key));
   return *result;
 }
 
@@ -440,23 +396,7 @@
 
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      KeyedGetObjectProperty(isolate, receiver_obj, key_obj, SLOPPY));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_KeyedGetPropertyStrong) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-
-  CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
-
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      KeyedGetObjectProperty(isolate, receiver_obj, key_obj, STRONG));
+      isolate, result, KeyedGetObjectProperty(isolate, receiver_obj, key_obj));
   return *result;
 }
 
@@ -563,10 +503,8 @@
 Object* DeleteProperty(Isolate* isolate, Handle<Object> object,
                        Handle<Object> key, LanguageMode language_mode) {
   Handle<JSReceiver> receiver;
-  if (!JSReceiver::ToObject(isolate, object).ToHandle(&receiver)) {
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
-  }
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
   Maybe<bool> result =
       Runtime::DeleteObjectProperty(isolate, receiver, key, language_mode);
   MAYBE_RETURN(result, isolate->heap()->exception());
@@ -603,11 +541,10 @@
   // Handle hidden prototypes.  If there's a hidden prototype above this thing
   // then we have to check it for properties, because they are supposed to
   // look like they are on this object.
-  PrototypeIterator iter(isolate, object);
-  if (!iter.IsAtEnd() &&
-      PrototypeIterator::GetCurrent<HeapObject>(iter)
-          ->map()
-          ->is_hidden_prototype()) {
+  if (object->map()->has_hidden_prototype()) {
+    PrototypeIterator iter(isolate, object);
+    DCHECK(!iter.IsAtEnd());
+
     // TODO(verwaest): The recursion is not necessary for keys that are array
     // indices. Removing this.
     // Casting to JSObject is fine because JSProxies are never used as
@@ -652,7 +589,7 @@
     }
     Map* map = js_obj->map();
     if (!key_is_array_index && !map->has_named_interceptor() &&
-        !HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
+        !map->has_hidden_prototype()) {
       return isolate->heap()->false_value();
     }
     // Slow case.
@@ -716,32 +653,6 @@
 }
 
 
-// Returns either a FixedArray or, if the given object has an enum cache that
-// contains all enumerable properties of the object and its prototypes have
-// none, the map of the object. This is used to speed up the check for
-// deletions during a for-in.
-RUNTIME_FUNCTION(Runtime_GetPropertyNamesFast) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-
-  CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0);
-
-  if (raw_object->IsSimpleEnum()) return raw_object->map();
-
-  HandleScope scope(isolate);
-  Handle<JSReceiver> object(raw_object);
-  Handle<FixedArray> content;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, content, JSReceiver::GetKeys(object, JSReceiver::INCLUDE_PROTOS,
-                                            ENUMERABLE_STRINGS));
-
-  // Test again, since cache may have been built by preceding call.
-  if (object->IsSimpleEnum()) return object->map();
-
-  return *content;
-}
-
-
 RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -751,8 +662,8 @@
 
   Handle<FixedArray> keys;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, keys, JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY, filter,
-                                         CONVERT_TO_STRING));
+      isolate, keys,
+      JSReceiver::GetKeys(object, OWN_ONLY, filter, CONVERT_TO_STRING));
 
   return *isolate->factory()->NewJSArrayWithElements(keys);
 }
@@ -943,6 +854,30 @@
   return *result;
 }
 
+RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 5);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+  CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
+  CONVERT_SMI_ARG_CHECKED(set_function_name, 4);
+
+  if (FLAG_harmony_function_name && set_function_name) {
+    DCHECK(value->IsJSFunction());
+    JSFunction::SetName(Handle<JSFunction>::cast(value), name,
+                        isolate->factory()->empty_string());
+  }
+
+  LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
+                                                        LookupIterator::OWN);
+  // Cannot fail since this should only be called when
+  // creating an object literal.
+  CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs,
+                                                    Object::DONT_THROW)
+            .IsJust());
+  return *object;
+}
 
 // Return property without being observable by accessors or interceptors.
 RUNTIME_FUNCTION(Runtime_GetDataProperty) {
@@ -972,34 +907,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_SetValueOf) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_CHECKED(Object, obj, 0);
-  CONVERT_ARG_CHECKED(Object, value, 1);
-  if (!obj->IsJSValue()) return value;
-  JSValue::cast(obj)->set_value(value);
-  return value;
-}
-
-
-RUNTIME_FUNCTION(Runtime_JSValueGetValue) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSValue, obj, 0);
-  return JSValue::cast(obj)->value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_ObjectEquals) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_CHECKED(Object, obj1, 0);
-  CONVERT_ARG_CHECKED(Object, obj2, 1);
-  return isolate->heap()->ToBoolean(obj1 == obj2);
-}
-
-
 RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 1);
@@ -1034,6 +941,11 @@
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, getter, 2);
   CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
 
+  if (FLAG_harmony_function_name &&
+      String::cast(getter->shared()->name())->length() == 0) {
+    JSFunction::SetName(getter, name, isolate->factory()->get_string());
+  }
+
   RETURN_FAILURE_ON_EXCEPTION(
       isolate,
       JSObject::DefineAccessor(object, name, getter,
@@ -1050,6 +962,11 @@
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, setter, 2);
   CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
 
+  if (FLAG_harmony_function_name &&
+      String::cast(setter->shared()->name())->length() == 0) {
+    JSFunction::SetName(setter, name, isolate->factory()->set_string());
+  }
+
   RETURN_FAILURE_ON_EXCEPTION(
       isolate,
       JSObject::DefineAccessor(object, name, isolate->factory()->null_value(),
@@ -1063,11 +980,9 @@
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   Handle<JSReceiver> receiver;
-  if (JSReceiver::ToObject(isolate, object).ToHandle(&receiver)) {
-    return *receiver;
-  }
-  THROW_NEW_ERROR_RETURN_FAILURE(
-      isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+                                     Object::ToObject(isolate, object));
+  return *receiver;
 }
 
 
@@ -1158,25 +1073,21 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_Equals) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
-  Maybe<bool> result = Object::Equals(x, y);
-  if (!result.IsJust()) return isolate->heap()->exception();
-  // TODO(bmeurer): Change this at some point to return true/false instead.
-  return Smi::FromInt(result.FromJust() ? EQUAL : NOT_EQUAL);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StrictEquals) {
+RUNTIME_FUNCTION(Runtime_SameValue) {
   SealHandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_CHECKED(Object, x, 0);
   CONVERT_ARG_CHECKED(Object, y, 1);
-  // TODO(bmeurer): Change this at some point to return true/false instead.
-  return Smi::FromInt(x->StrictEquals(y) ? EQUAL : NOT_EQUAL);
+  return isolate->heap()->ToBoolean(x->SameValue(y));
+}
+
+
+RUNTIME_FUNCTION(Runtime_SameValueZero) {
+  SealHandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_CHECKED(Object, x, 0);
+  CONVERT_ARG_CHECKED(Object, y, 1);
+  return isolate->heap()->ToBoolean(x->SameValueZero(y));
 }
 
 
@@ -1206,32 +1117,6 @@
 }
 
 
-// TODO(bmeurer): Kill this special wrapper and use TF compatible LessThan,
-// GreaterThan, etc. which return true or false.
-RUNTIME_FUNCTION(Runtime_Compare_Strong) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(3, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, ncr, 2);
-  Maybe<ComparisonResult> result = Object::Compare(x, y, Strength::STRONG);
-  if (result.IsJust()) {
-    switch (result.FromJust()) {
-      case ComparisonResult::kLessThan:
-        return Smi::FromInt(LESS);
-      case ComparisonResult::kEqual:
-        return Smi::FromInt(EQUAL);
-      case ComparisonResult::kGreaterThan:
-        return Smi::FromInt(GREATER);
-      case ComparisonResult::kUndefined:
-        return *ncr;
-    }
-    UNREACHABLE();
-  }
-  return isolate->heap()->exception();
-}
-
-
 RUNTIME_FUNCTION(Runtime_InstanceOf) {
   // ECMA-262, section 11.8.6, page 54.
   HandleScope shs(isolate);
@@ -1268,7 +1153,9 @@
         NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
   }
   // Return whether or not {prototype} is in the prototype chain of {object}.
-  Maybe<bool> result = Object::HasInPrototypeChain(isolate, object, prototype);
+  Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+  Maybe<bool> result =
+      JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
   MAYBE_RETURN(result, isolate->heap()->exception());
   return isolate->heap()->ToBoolean(result.FromJust());
 }
@@ -1277,9 +1164,10 @@
 RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
-  Maybe<bool> result = Object::HasInPrototypeChain(isolate, object, prototype);
+  Maybe<bool> result =
+      JSReceiver::HasInPrototypeChain(isolate, object, prototype);
   MAYBE_RETURN(result, isolate->heap()->exception());
   return isolate->heap()->ToBoolean(result.FromJust());
 }
@@ -1291,7 +1179,11 @@
   DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, done, 1);
-  return *isolate->factory()->NewJSIteratorResult(value, done);
+  Handle<JSObject> result =
+      isolate->factory()->NewJSObjectFromMap(isolate->iterator_result_map());
+  result->InObjectPropertyAtPut(JSIteratorResult::kValueIndex, *value);
+  result->InObjectPropertyAtPut(JSIteratorResult::kDoneIndex, *done);
+  return *result;
 }
 
 
diff --git a/src/runtime/runtime-operators.cc b/src/runtime/runtime-operators.cc
index b5e92af..e55ab7c 100644
--- a/src/runtime/runtime-operators.cc
+++ b/src/runtime/runtime-operators.cc
@@ -21,18 +21,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_Multiply_Strong) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::Multiply(isolate, lhs, rhs, Strength::STRONG));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_Divide) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -45,18 +33,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_Divide_Strong) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::Divide(isolate, lhs, rhs, Strength::STRONG));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_Modulus) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -69,18 +45,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_Modulus_Strong) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::Modulus(isolate, lhs, rhs, Strength::STRONG));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_Add) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -93,18 +57,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_Add_Strong) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::Add(isolate, lhs, rhs, Strength::STRONG));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_Subtract) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -117,18 +69,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_Subtract_Strong) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::Subtract(isolate, lhs, rhs, Strength::STRONG));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_ShiftLeft) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -141,18 +81,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_ShiftLeft_Strong) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::ShiftLeft(isolate, lhs, rhs, Strength::STRONG));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_ShiftRight) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -165,18 +93,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_ShiftRight_Strong) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::ShiftRight(isolate, lhs, rhs, Strength::STRONG));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_ShiftRightLogical) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -189,19 +105,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_ShiftRightLogical_Strong) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Object::ShiftRightLogical(isolate, lhs, rhs, Strength::STRONG));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_BitwiseAnd) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -214,18 +117,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_BitwiseAnd_Strong) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::BitwiseAnd(isolate, lhs, rhs, Strength::STRONG));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_BitwiseOr) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -238,18 +129,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_BitwiseOr_Strong) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::BitwiseOr(isolate, lhs, rhs, Strength::STRONG));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_BitwiseXor) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -261,16 +140,80 @@
   return *result;
 }
 
-
-RUNTIME_FUNCTION(Runtime_BitwiseXor_Strong) {
+RUNTIME_FUNCTION(Runtime_Equal) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, Object::BitwiseXor(isolate, lhs, rhs, Strength::STRONG));
-  return *result;
+  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+  Maybe<bool> result = Object::Equals(x, y);
+  if (!result.IsJust()) return isolate->heap()->exception();
+  return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_NotEqual) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+  Maybe<bool> result = Object::Equals(x, y);
+  if (!result.IsJust()) return isolate->heap()->exception();
+  return isolate->heap()->ToBoolean(!result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_StrictEqual) {
+  SealHandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_CHECKED(Object, x, 0);
+  CONVERT_ARG_CHECKED(Object, y, 1);
+  return isolate->heap()->ToBoolean(x->StrictEquals(y));
+}
+
+RUNTIME_FUNCTION(Runtime_StrictNotEqual) {
+  SealHandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_CHECKED(Object, x, 0);
+  CONVERT_ARG_CHECKED(Object, y, 1);
+  return isolate->heap()->ToBoolean(!x->StrictEquals(y));
+}
+
+RUNTIME_FUNCTION(Runtime_LessThan) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+  Maybe<bool> result = Object::LessThan(x, y);
+  if (!result.IsJust()) return isolate->heap()->exception();
+  return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_GreaterThan) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+  Maybe<bool> result = Object::GreaterThan(x, y);
+  if (!result.IsJust()) return isolate->heap()->exception();
+  return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_LessThanOrEqual) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+  Maybe<bool> result = Object::LessThanOrEqual(x, y);
+  if (!result.IsJust()) return isolate->heap()->exception();
+  return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+RUNTIME_FUNCTION(Runtime_GreaterThanOrEqual) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+  Maybe<bool> result = Object::GreaterThanOrEqual(x, y);
+  if (!result.IsJust()) return isolate->heap()->exception();
+  return isolate->heap()->ToBoolean(result.FromJust());
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-proxy.cc b/src/runtime/runtime-proxy.cc
index 3a521c6..7764d25 100644
--- a/src/runtime/runtime-proxy.cc
+++ b/src/runtime/runtime-proxy.cc
@@ -58,9 +58,8 @@
   ElementsAccessor* accessor = arg_array->GetElementsAccessor();
   {
     DisallowHeapAllocation no_gc;
-    FixedArrayBase* elements = arg_array->elements();
     for (int i = 0; i < arguments_length; i++) {
-      accessor->Set(elements, i, args[i + 1]);
+      accessor->Set(arg_array, i, args[i + 1]);
     }
   }
   // 8. Return Call(trap, handler, «target, thisArgument, argArray»).
@@ -119,9 +118,8 @@
   ElementsAccessor* accessor = arg_array->GetElementsAccessor();
   {
     DisallowHeapAllocation no_gc;
-    FixedArrayBase* elements = arg_array->elements();
     for (int i = 0; i < arguments_length; i++) {
-      accessor->Set(elements, i, args[i + 1]);
+      accessor->Set(arg_array, i, args[i + 1]);
     }
   }
   // 8. Let newObj be ? Call(trap, handler, «target, argArray, newTarget »).
diff --git a/src/runtime/runtime-regexp.cc b/src/runtime/runtime-regexp.cc
index 138b4dc..df86aa8 100644
--- a/src/runtime/runtime-regexp.cc
+++ b/src/runtime/runtime-regexp.cc
@@ -492,7 +492,7 @@
     }
   }
 
-  RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+  RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
   if (global_cache.HasException()) return isolate->heap()->exception();
 
   int32_t* current_match = global_cache.FetchNext();
@@ -568,7 +568,7 @@
     }
   }
 
-  RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+  RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
   if (global_cache.HasException()) return isolate->heap()->exception();
 
   int32_t* current_match = global_cache.FetchNext();
@@ -876,7 +876,7 @@
     }
   }
 
-  RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+  RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
   if (global_cache.HasException()) return isolate->heap()->exception();
 
   // Ensured in Runtime_RegExpExecMultiple.
diff --git a/src/runtime/runtime-scopes.cc b/src/runtime/runtime-scopes.cc
index 094f1a1..a8f3a74 100644
--- a/src/runtime/runtime-scopes.cc
+++ b/src/runtime/runtime-scopes.cc
@@ -65,21 +65,27 @@
       // Check whether we can reconfigure the existing property into a
       // function.
       PropertyDetails old_details = it.property_details();
-      // TODO(verwaest): ACCESSOR_CONSTANT invalidly includes
-      // ExecutableAccessInfo,
-      // which are actually data properties, not accessor properties.
       if (old_details.IsReadOnly() || old_details.IsDontEnum() ||
-          old_details.type() == ACCESSOR_CONSTANT) {
+          (it.state() == LookupIterator::ACCESSOR &&
+           it.GetAccessors()->IsAccessorPair())) {
         return ThrowRedeclarationError(isolate, name);
       }
       // If the existing property is not configurable, keep its attributes. Do
       attr = old_attributes;
     }
+
+    // If the current state is ACCESSOR, this could mean it's an AccessorInfo
+    // type property. We are not allowed to call into such setters during global
+    // function declaration since this would break e.g., onload. Meaning
+    // 'function onload() {}' would invalidly register that function as the
+    // onload callback. To avoid this situation, we first delete the property
+    // before readding it as a regular data property below.
+    if (it.state() == LookupIterator::ACCESSOR) it.Delete();
   }
 
   // Define or redefine own property.
-  RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
-                                           global, name, value, attr));
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate, JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attr));
 
   return isolate->heap()->undefined_value();
 }
@@ -196,8 +202,8 @@
     }
   }
 
-  RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
-                                           global, name, value, attr));
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate, JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attr));
 
   return *value;
 }
@@ -414,10 +420,8 @@
 namespace {
 
 // Find the arguments of the JavaScript function invocation that called
-// into C++ code. Collect these in a newly allocated array of handles (possibly
-// prefixed by a number of empty handles).
+// into C++ code. Collect these in a newly allocated array of handles.
 base::SmartArrayPointer<Handle<Object>> GetCallerArguments(Isolate* isolate,
-                                                           int prefix_argc,
                                                            int* total_argc) {
   // Find frame containing arguments passed to the caller.
   JavaScriptFrameIterator it(isolate);
@@ -442,14 +446,14 @@
     iter++;
     argument_count--;
 
-    *total_argc = prefix_argc + argument_count;
+    *total_argc = argument_count;
     base::SmartArrayPointer<Handle<Object>> param_data(
         NewArray<Handle<Object>>(*total_argc));
     bool should_deoptimize = false;
     for (int i = 0; i < argument_count; i++) {
       should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
       Handle<Object> value = iter->GetValue();
-      param_data[prefix_argc + i] = value;
+      param_data[i] = value;
       iter++;
     }
 
@@ -463,12 +467,12 @@
     frame = it.frame();
     int args_count = frame->ComputeParametersCount();
 
-    *total_argc = prefix_argc + args_count;
+    *total_argc = args_count;
     base::SmartArrayPointer<Handle<Object>> param_data(
         NewArray<Handle<Object>>(*total_argc));
     for (int i = 0; i < args_count; i++) {
       Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate);
-      param_data[prefix_argc + i] = val;
+      param_data[i] = val;
     }
     return param_data;
   }
@@ -564,46 +568,6 @@
 }
 
 
-template <typename T>
-Handle<JSObject> NewStrictArguments(Isolate* isolate, Handle<JSFunction> callee,
-                                    T parameters, int argument_count) {
-  Handle<JSObject> result =
-      isolate->factory()->NewArgumentsObject(callee, argument_count);
-
-  if (argument_count > 0) {
-    Handle<FixedArray> array =
-        isolate->factory()->NewUninitializedFixedArray(argument_count);
-    DisallowHeapAllocation no_gc;
-    WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
-    for (int i = 0; i < argument_count; i++) {
-      array->set(i, parameters[i], mode);
-    }
-    result->set_elements(*array);
-  }
-  return result;
-}
-
-
-template <typename T>
-Handle<JSObject> NewRestArguments(Isolate* isolate, Handle<JSFunction> callee,
-                                  T parameters, int argument_count,
-                                  int start_index) {
-  int num_elements = std::max(0, argument_count - start_index);
-  Handle<JSObject> result = isolate->factory()->NewJSArray(
-      FAST_ELEMENTS, num_elements, num_elements, Strength::WEAK,
-      DONT_INITIALIZE_ARRAY_ELEMENTS);
-  {
-    DisallowHeapAllocation no_gc;
-    FixedArray* elements = FixedArray::cast(result->elements());
-    WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
-    for (int i = 0; i < num_elements; i++) {
-      elements->set(i, parameters[i + start_index], mode);
-    }
-  }
-  return result;
-}
-
-
 class HandleArguments BASE_EMBEDDED {
  public:
   explicit HandleArguments(Handle<Object>* array) : array_(array) {}
@@ -634,39 +598,60 @@
   // inlined, we use the slow but accurate {GetCallerArguments}.
   int argument_count = 0;
   base::SmartArrayPointer<Handle<Object>> arguments =
-      GetCallerArguments(isolate, 0, &argument_count);
+      GetCallerArguments(isolate, &argument_count);
   HandleArguments argument_getter(arguments.get());
   return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
 }
 
 
-RUNTIME_FUNCTION(Runtime_NewStrictArguments_Generic) {
+RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
   // This generic runtime function can also be used when the caller has been
   // inlined, we use the slow but accurate {GetCallerArguments}.
   int argument_count = 0;
   base::SmartArrayPointer<Handle<Object>> arguments =
-      GetCallerArguments(isolate, 0, &argument_count);
-  HandleArguments argument_getter(arguments.get());
-  return *NewStrictArguments(isolate, callee, argument_getter, argument_count);
+      GetCallerArguments(isolate, &argument_count);
+  Handle<JSObject> result =
+      isolate->factory()->NewArgumentsObject(callee, argument_count);
+  if (argument_count) {
+    Handle<FixedArray> array =
+        isolate->factory()->NewUninitializedFixedArray(argument_count);
+    DisallowHeapAllocation no_gc;
+    WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
+    for (int i = 0; i < argument_count; i++) {
+      array->set(i, *arguments[i], mode);
+    }
+    result->set_elements(*array);
+  }
+  return *result;
 }
 
 
-RUNTIME_FUNCTION(Runtime_NewRestArguments_Generic) {
+RUNTIME_FUNCTION(Runtime_NewRestParameter) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
-  CONVERT_SMI_ARG_CHECKED(start_index, 1);
+  int start_index = callee->shared()->internal_formal_parameter_count();
   // This generic runtime function can also be used when the caller has been
   // inlined, we use the slow but accurate {GetCallerArguments}.
   int argument_count = 0;
   base::SmartArrayPointer<Handle<Object>> arguments =
-      GetCallerArguments(isolate, 0, &argument_count);
-  HandleArguments argument_getter(arguments.get());
-  return *NewRestArguments(isolate, callee, argument_getter, argument_count,
-                           start_index);
+      GetCallerArguments(isolate, &argument_count);
+  int num_elements = std::max(0, argument_count - start_index);
+  Handle<JSObject> result = isolate->factory()->NewJSArray(
+      FAST_ELEMENTS, num_elements, num_elements, Strength::WEAK,
+      DONT_INITIALIZE_ARRAY_ELEMENTS);
+  {
+    DisallowHeapAllocation no_gc;
+    FixedArray* elements = FixedArray::cast(result->elements());
+    WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+    for (int i = 0; i < num_elements; i++) {
+      elements->set(i, *arguments[i + start_index], mode);
+    }
+  }
+  return *result;
 }
 
 
@@ -687,42 +672,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
-  Object** parameters = reinterpret_cast<Object**>(args[1]);
-  CONVERT_SMI_ARG_CHECKED(argument_count, 2);
-#ifdef DEBUG
-  // This runtime function does not materialize the correct arguments when the
-  // caller has been inlined, better make sure we are not hitting that case.
-  JavaScriptFrameIterator it(isolate);
-  DCHECK(!it.frame()->HasInlinedFrames());
-#endif  // DEBUG
-  ParameterArguments argument_getter(parameters);
-  return *NewStrictArguments(isolate, callee, argument_getter, argument_count);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NewRestParam) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_SMI_ARG_CHECKED(num_params, 0);
-  Object** parameters = reinterpret_cast<Object**>(args[1]);
-  CONVERT_SMI_ARG_CHECKED(rest_index, 2);
-#ifdef DEBUG
-  // This runtime function does not materialize the correct arguments when the
-  // caller has been inlined, better make sure we are not hitting that case.
-  JavaScriptFrameIterator it(isolate);
-  DCHECK(!it.frame()->HasInlinedFrames());
-#endif  // DEBUG
-  Handle<JSFunction> callee;
-  ParameterArguments argument_getter(parameters);
-  return *NewRestArguments(isolate, callee, argument_getter, num_params,
-                           rest_index);
-}
-
-
 RUNTIME_FUNCTION(Runtime_NewClosure) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
@@ -957,17 +906,14 @@
 
 RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-
-  CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
 
   int index;
   PropertyAttributes attributes;
-  ContextLookupFlags flags = FOLLOW_CHAINS;
-  BindingFlags binding_flags;
-  Handle<Object> holder =
-      context->Lookup(name, flags, &index, &attributes, &binding_flags);
+  BindingFlags flags;
+  Handle<Object> holder = isolate->context()->Lookup(
+      name, FOLLOW_CHAINS, &index, &attributes, &flags);
 
   // If the slot was not found the result is true.
   if (holder.is_null()) {
@@ -991,161 +937,158 @@
 }
 
 
-static Object* ComputeReceiverForNonGlobal(Isolate* isolate, JSObject* holder) {
-  DCHECK(!holder->IsJSGlobalObject());
+namespace {
 
-  // If the holder isn't a context extension object, we just return it
-  // as the receiver. This allows arguments objects to be used as
-  // receivers, but only if they are put in the context scope chain
-  // explicitly via a with-statement.
-  if (holder->map()->instance_type() != JS_CONTEXT_EXTENSION_OBJECT_TYPE) {
-    return holder;
-  }
-  // Fall back to using the global object as the implicit receiver if
-  // the property turns out to be a local variable allocated in a
-  // context extension object - introduced via eval.
-  return isolate->heap()->undefined_value();
-}
-
-
-static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
-                                       bool throw_error) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-
-  if (!args[0]->IsContext() || !args[1]->IsString()) {
-    return MakePair(isolate->ThrowIllegalOperation(), NULL);
-  }
-  Handle<Context> context = args.at<Context>(0);
-  Handle<String> name = args.at<String>(1);
+MaybeHandle<Object> LoadLookupSlot(Handle<String> name,
+                                   Object::ShouldThrow should_throw,
+                                   Handle<Object>* receiver_return = nullptr) {
+  Isolate* const isolate = name->GetIsolate();
 
   int index;
   PropertyAttributes attributes;
-  ContextLookupFlags flags = FOLLOW_CHAINS;
-  BindingFlags binding_flags;
-  Handle<Object> holder =
-      context->Lookup(name, flags, &index, &attributes, &binding_flags);
-  if (isolate->has_pending_exception()) {
-    return MakePair(isolate->heap()->exception(), NULL);
-  }
+  BindingFlags flags;
+  Handle<Object> holder = isolate->context()->Lookup(
+      name, FOLLOW_CHAINS, &index, &attributes, &flags);
+  if (isolate->has_pending_exception()) return MaybeHandle<Object>();
 
   if (index != Context::kNotFound) {
     DCHECK(holder->IsContext());
     // If the "property" we were looking for is a local variable, the
     // receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
     Handle<Object> receiver = isolate->factory()->undefined_value();
-    Object* value = Context::cast(*holder)->get(index);
+    Handle<Object> value = handle(Context::cast(*holder)->get(index), isolate);
     // Check for uninitialized bindings.
-    switch (binding_flags) {
+    switch (flags) {
       case MUTABLE_CHECK_INITIALIZED:
       case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
         if (value->IsTheHole()) {
-          Handle<Object> error = isolate->factory()->NewReferenceError(
-              MessageTemplate::kNotDefined, name);
-          isolate->Throw(*error);
-          return MakePair(isolate->heap()->exception(), NULL);
+          THROW_NEW_ERROR(isolate,
+                          NewReferenceError(MessageTemplate::kNotDefined, name),
+                          Object);
+        }
+      // FALLTHROUGH
+      case IMMUTABLE_CHECK_INITIALIZED:
+        if (value->IsTheHole()) {
+          DCHECK(attributes & READ_ONLY);
+          value = isolate->factory()->undefined_value();
         }
       // FALLTHROUGH
       case MUTABLE_IS_INITIALIZED:
       case IMMUTABLE_IS_INITIALIZED:
       case IMMUTABLE_IS_INITIALIZED_HARMONY:
         DCHECK(!value->IsTheHole());
-        return MakePair(value, *receiver);
-      case IMMUTABLE_CHECK_INITIALIZED:
-        if (value->IsTheHole()) {
-          DCHECK((attributes & READ_ONLY) != 0);
-          value = isolate->heap()->undefined_value();
-        }
-        return MakePair(value, *receiver);
+        if (receiver_return) *receiver_return = receiver;
+        return value;
       case MISSING_BINDING:
-        UNREACHABLE();
-        return MakePair(NULL, NULL);
+        break;
     }
+    UNREACHABLE();
   }
 
   // Otherwise, if the slot was found the holder is a context extension
   // object, subject of a with, or a global object.  We read the named
   // property from it.
   if (!holder.is_null()) {
-    Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
-    // GetProperty below can cause GC.
-    Handle<Object> receiver_handle(
-        object->IsJSGlobalObject()
-            ? Object::cast(isolate->heap()->undefined_value())
-            : object->IsJSProxy() ? static_cast<Object*>(*object)
-                                  : ComputeReceiverForNonGlobal(
-                                        isolate, JSObject::cast(*object)),
-        isolate);
-
     // No need to unhole the value here.  This is taken care of by the
     // GetProperty function.
     Handle<Object> value;
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-        isolate, value, Object::GetProperty(object, name),
-        MakePair(isolate->heap()->exception(), NULL));
-    return MakePair(*value, *receiver_handle);
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, value, Object::GetProperty(holder, name),
+        Object);
+    if (receiver_return) {
+      *receiver_return =
+          (holder->IsJSGlobalObject() || holder->IsJSContextExtensionObject())
+              ? Handle<Object>::cast(isolate->factory()->undefined_value())
+              : holder;
+    }
+    return value;
   }
 
-  if (throw_error) {
+  if (should_throw == Object::THROW_ON_ERROR) {
     // The property doesn't exist - throw exception.
-    Handle<Object> error = isolate->factory()->NewReferenceError(
-        MessageTemplate::kNotDefined, name);
-    isolate->Throw(*error);
-    return MakePair(isolate->heap()->exception(), NULL);
-  } else {
-    // The property doesn't exist - return undefined.
-    return MakePair(isolate->heap()->undefined_value(),
-                    isolate->heap()->undefined_value());
+    THROW_NEW_ERROR(
+        isolate, NewReferenceError(MessageTemplate::kNotDefined, name), Object);
   }
+
+  // The property doesn't exist - return undefined.
+  if (receiver_return) *receiver_return = isolate->factory()->undefined_value();
+  return isolate->factory()->undefined_value();
 }
 
-
-RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlot) {
-  return LoadLookupSlotHelper(args, isolate, true);
-}
+}  // namespace
 
 
-RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotNoReferenceError) {
-  return LoadLookupSlotHelper(args, isolate, false);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
+RUNTIME_FUNCTION(Runtime_LoadLookupSlot) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  Handle<Object> value;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, value, LoadLookupSlot(name, Object::THROW_ON_ERROR));
+  return *value;
+}
 
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Context, context, 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
-  CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
+
+RUNTIME_FUNCTION(Runtime_LoadLookupSlotInsideTypeof) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  Handle<Object> value;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, value, LoadLookupSlot(name, Object::DONT_THROW));
+  return *value;
+}
+
+
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotForCall) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  DCHECK(args[0]->IsString());
+  Handle<String> name = args.at<String>(0);
+  Handle<Object> value;
+  Handle<Object> receiver;
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate, value, LoadLookupSlot(name, Object::THROW_ON_ERROR, &receiver),
+      MakePair(isolate->heap()->exception(), nullptr));
+  return MakePair(*value, *receiver);
+}
+
+
+namespace {
+
+MaybeHandle<Object> StoreLookupSlot(Handle<String> name, Handle<Object> value,
+                                    LanguageMode language_mode) {
+  Isolate* const isolate = name->GetIsolate();
+  Handle<Context> context(isolate->context(), isolate);
 
   int index;
   PropertyAttributes attributes;
-  ContextLookupFlags flags = FOLLOW_CHAINS;
-  BindingFlags binding_flags;
+  BindingFlags flags;
   Handle<Object> holder =
-      context->Lookup(name, flags, &index, &attributes, &binding_flags);
+      context->Lookup(name, FOLLOW_CHAINS, &index, &attributes, &flags);
   if (holder.is_null()) {
     // In case of JSProxy, an exception might have been thrown.
-    if (isolate->has_pending_exception()) return isolate->heap()->exception();
+    if (isolate->has_pending_exception()) return MaybeHandle<Object>();
   }
 
   // The property was found in a context slot.
   if (index != Context::kNotFound) {
-    if ((binding_flags == MUTABLE_CHECK_INITIALIZED ||
-         binding_flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY) &&
+    if ((flags == MUTABLE_CHECK_INITIALIZED ||
+         flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY) &&
         Handle<Context>::cast(holder)->is_the_hole(index)) {
-      THROW_NEW_ERROR_RETURN_FAILURE(
-          isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
+      THROW_NEW_ERROR(isolate,
+                      NewReferenceError(MessageTemplate::kNotDefined, name),
+                      Object);
     }
     if ((attributes & READ_ONLY) == 0) {
       Handle<Context>::cast(holder)->set(index, *value);
     } else if (is_strict(language_mode)) {
       // Setting read only property in strict mode.
-      THROW_NEW_ERROR_RETURN_FAILURE(
-          isolate, NewTypeError(MessageTemplate::kStrictCannotAssign, name));
+      THROW_NEW_ERROR(isolate,
+                      NewTypeError(MessageTemplate::kStrictCannotAssign, name),
+                      Object);
     }
-    return *value;
+    return value;
   }
 
   // Slow case: The property is not in a context slot.  It is either in a
@@ -1157,101 +1100,42 @@
     object = Handle<JSReceiver>::cast(holder);
   } else if (is_strict(language_mode)) {
     // If absent in strict mode: throw.
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
+    THROW_NEW_ERROR(
+        isolate, NewReferenceError(MessageTemplate::kNotDefined, name), Object);
   } else {
     // If absent in sloppy mode: add the property to the global object.
     object = Handle<JSReceiver>(context->global_object());
   }
 
-  RETURN_FAILURE_ON_EXCEPTION(
-      isolate, Object::SetProperty(object, name, value, language_mode));
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, value, Object::SetProperty(object, name, value, language_mode),
+      Object);
+  return value;
+}
 
+}  // namespace
+
+
+RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Sloppy) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+                                     StoreLookupSlot(name, value, SLOPPY));
   return *value;
 }
 
 
-RUNTIME_FUNCTION(Runtime_ArgumentsLength) {
+RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  int argument_count = 0;
-  GetCallerArguments(isolate, 0, &argument_count);
-  return Smi::FromInt(argument_count);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+                                     StoreLookupSlot(name, value, STRICT));
+  return *value;
 }
 
-
-RUNTIME_FUNCTION(Runtime_Arguments) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, raw_key, 0);
-
-  // Determine the actual arguments passed to the function.
-  int argument_count_signed = 0;
-  base::SmartArrayPointer<Handle<Object>> arguments =
-      GetCallerArguments(isolate, 0, &argument_count_signed);
-  const uint32_t argument_count = argument_count_signed;
-
-  // Try to convert the key to an index. If successful and within
-  // index return the the argument from the frame.
-  uint32_t index = 0;
-  if (raw_key->ToArrayIndex(&index) && index < argument_count) {
-    return *arguments[index];
-  }
-
-  if (raw_key->IsSymbol()) {
-    Handle<Symbol> symbol = Handle<Symbol>::cast(raw_key);
-    if (Name::Equals(symbol, isolate->factory()->iterator_symbol())) {
-      return isolate->native_context()->array_values_iterator();
-    }
-    // Lookup in the initial Object.prototype object.
-    Handle<Object> result;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result,
-        Object::GetProperty(isolate->initial_object_prototype(),
-                            Handle<Symbol>::cast(raw_key)));
-    return *result;
-  }
-
-  // Convert the key to a string.
-  Handle<Object> converted;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, converted,
-                                     Object::ToString(isolate, raw_key));
-  Handle<String> key = Handle<String>::cast(converted);
-
-  // Try to convert the string key into an array index.
-  if (key->AsArrayIndex(&index)) {
-    if (index < argument_count) {
-      return *arguments[index];
-    } else {
-      Handle<Object> initial_prototype(isolate->initial_object_prototype());
-      Handle<Object> result;
-      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-          isolate, result,
-          Object::GetElement(isolate, initial_prototype, index));
-      return *result;
-    }
-  }
-
-  // Handle special arguments properties.
-  if (String::Equals(isolate->factory()->length_string(), key)) {
-    return Smi::FromInt(argument_count);
-  }
-  if (String::Equals(isolate->factory()->callee_string(), key)) {
-    JavaScriptFrameIterator it(isolate);
-    JSFunction* function = it.frame()->function();
-    if (is_strict(function->shared()->language_mode())) {
-      THROW_NEW_ERROR_RETURN_FAILURE(
-          isolate, NewTypeError(MessageTemplate::kStrictPoisonPill));
-    }
-    return function;
-  }
-
-  // Lookup in the initial Object.prototype object.
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Object::GetProperty(isolate->initial_object_prototype(), key));
-  return *result;
-}
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-simd.cc b/src/runtime/runtime-simd.cc
index 59e4fa1..9e56142 100644
--- a/src/runtime/runtime-simd.cc
+++ b/src/runtime/runtime-simd.cc
@@ -164,46 +164,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_SimdSameValue) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(Simd128Value, a, 0);
-  bool result = false;
-  // args[1] is of unknown type.
-  if (args[1]->IsSimd128Value()) {
-    Simd128Value* b = Simd128Value::cast(args[1]);
-    if (a->map() == b->map()) {
-      if (a->IsFloat32x4()) {
-        result = Float32x4::cast(*a)->SameValue(Float32x4::cast(b));
-      } else {
-        result = a->BitwiseEquals(b);
-      }
-    }
-  }
-  return isolate->heap()->ToBoolean(result);
-}
-
-
-RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(Simd128Value, a, 0);
-  bool result = false;
-  // args[1] is of unknown type.
-  if (args[1]->IsSimd128Value()) {
-    Simd128Value* b = Simd128Value::cast(args[1]);
-    if (a->map() == b->map()) {
-      if (a->IsFloat32x4()) {
-        result = Float32x4::cast(*a)->SameValueZero(Float32x4::cast(b));
-      } else {
-        result = a->BitwiseEquals(b);
-      }
-    }
-  }
-  return isolate->heap()->ToBoolean(result);
-}
-
-
 //-------------------------------------------------------------------
 
 // Utility macros.
diff --git a/src/runtime/runtime-strings.cc b/src/runtime/runtime-strings.cc
index bd4dd69..fcec47d 100644
--- a/src/runtime/runtime-strings.cc
+++ b/src/runtime/runtime-strings.cc
@@ -341,7 +341,7 @@
 
   RUNTIME_ASSERT(regexp_info->HasFastObjectElements());
 
-  RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+  RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
   if (global_cache.HasException()) return isolate->heap()->exception();
 
   int capture_count = regexp->CaptureCount();
diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc
index 3b92d7f..5f27a60 100644
--- a/src/runtime/runtime-test.cc
+++ b/src/runtime/runtime-test.cc
@@ -377,8 +377,7 @@
 
 RUNTIME_FUNCTION(Runtime_NativeScriptsCount) {
   DCHECK(args.length() == 0);
-  return Smi::FromInt(Natives::GetBuiltinsCount() +
-                      ExtraNatives::GetBuiltinsCount());
+  return Smi::FromInt(Natives::GetBuiltinsCount());
 }
 
 
@@ -409,53 +408,54 @@
   return isolate->heap()->undefined_value();
 }
 
+namespace {
 
-static int StackSize(Isolate* isolate) {
+int StackSize(Isolate* isolate) {
   int n = 0;
   for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) n++;
   return n;
 }
 
-
-static void PrintTransition(Isolate* isolate, Object* result) {
-  // indentation
-  {
-    const int nmax = 80;
-    int n = StackSize(isolate);
-    if (n <= nmax)
-      PrintF("%4d:%*s", n, n, "");
-    else
-      PrintF("%4d:%*s", n, nmax, "...");
-  }
-
-  if (result == NULL) {
-    JavaScriptFrame::PrintTop(isolate, stdout, true, false);
-    PrintF(" {\n");
+void PrintIndentation(Isolate* isolate) {
+  const int nmax = 80;
+  int n = StackSize(isolate);
+  if (n <= nmax) {
+    PrintF("%4d:%*s", n, n, "");
   } else {
-    // function result
-    PrintF("} -> ");
-    result->ShortPrint();
-    PrintF("\n");
+    PrintF("%4d:%*s", n, nmax, "...");
   }
 }
 
+}  // namespace
 
 RUNTIME_FUNCTION(Runtime_TraceEnter) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
-  PrintTransition(isolate, NULL);
+  DCHECK_EQ(0, args.length());
+  PrintIndentation(isolate);
+  JavaScriptFrame::PrintTop(isolate, stdout, true, false);
+  PrintF(" {\n");
   return isolate->heap()->undefined_value();
 }
 
 
 RUNTIME_FUNCTION(Runtime_TraceExit) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, obj, 0);
-  PrintTransition(isolate, obj);
+  PrintIndentation(isolate);
+  PrintF("} -> ");
+  obj->ShortPrint();
+  PrintF("\n");
   return obj;  // return TOS
 }
 
+RUNTIME_FUNCTION(Runtime_TraceTailCall) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(0, args.length());
+  PrintIndentation(isolate);
+  PrintF("} -> tail call ->\n");
+  return isolate->heap()->undefined_value();
+}
 
 RUNTIME_FUNCTION(Runtime_HaveSameMap) {
   SealHandleScope shs(isolate);
diff --git a/src/runtime/runtime-typedarray.cc b/src/runtime/runtime-typedarray.cc
index a82b71d..bf0ee9f 100644
--- a/src/runtime/runtime-typedarray.cc
+++ b/src/runtime/runtime-typedarray.cc
@@ -414,42 +414,6 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
-  CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 1);
-  CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset, 2);
-  CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length, 3);
-
-  DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
-            holder->GetInternalFieldCount());
-  for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
-    holder->SetInternalField(i, Smi::FromInt(0));
-  }
-  size_t buffer_length = 0;
-  size_t offset = 0;
-  size_t length = 0;
-  RUNTIME_ASSERT(
-      TryNumberToSize(isolate, buffer->byte_length(), &buffer_length));
-  RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset, &offset));
-  RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length, &length));
-
-  // TODO(jkummerow): When we have a "safe numerics" helper class, use it here.
-  // Entire range [offset, offset + length] must be in bounds.
-  RUNTIME_ASSERT(offset <= buffer_length);
-  RUNTIME_ASSERT(offset + length <= buffer_length);
-  // No overflow.
-  RUNTIME_ASSERT(offset + length >= offset);
-
-  holder->set_buffer(*buffer);
-  holder->set_byte_offset(*byte_offset);
-  holder->set_byte_length(*byte_length);
-
-  return isolate->heap()->undefined_value();
-}
-
-
 inline static bool NeedToFlipBytes(bool is_little_endian) {
 #ifdef V8_TARGET_LITTLE_ENDIAN
   return !is_little_endian;
diff --git a/src/runtime/runtime-utils.h b/src/runtime/runtime-utils.h
index ded2c09..c673b5a 100644
--- a/src/runtime/runtime-utils.h
+++ b/src/runtime/runtime-utils.h
@@ -162,6 +162,22 @@
 }
 #endif
 
+
+// A mechanism to return a triple of Object pointers. In all calling
+// conventions, a struct of two pointers is returned in memory,
+// allocated by the caller, and passed as a pointer in a hidden first parameter.
+struct ObjectTriple {
+  Object* x;
+  Object* y;
+  Object* z;
+};
+
+static inline ObjectTriple MakeTriple(Object* x, Object* y, Object* z) {
+  ObjectTriple result = {x, y, z};
+  // ObjectTriple is assigned to a hidden first argument.
+  return result;
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/runtime/runtime.cc b/src/runtime/runtime.cc
index 90f4e4c..151e240 100644
--- a/src/runtime/runtime.cc
+++ b/src/runtime/runtime.cc
@@ -27,6 +27,12 @@
 FOR_EACH_INTRINSIC_RETURN_PAIR(P)
 #undef P
 
+#define T(name, number_of_args, result_size)                         \
+  ObjectTriple Runtime_##name(int args_length, Object** args_object, \
+                              Isolate* isolate);
+FOR_EACH_INTRINSIC_RETURN_TRIPLE(T)
+#undef T
+
 
 #define F(name, number_of_args, result_size)                                  \
   {                                                                           \
@@ -124,5 +130,6 @@
   return os << Runtime::FunctionForId(id)->name;
 }
 
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
index 283087a..7019c3b 100644
--- a/src/runtime/runtime.h
+++ b/src/runtime/runtime.h
@@ -6,6 +6,7 @@
 #define V8_RUNTIME_RUNTIME_H_
 
 #include "src/allocation.h"
+#include "src/base/platform/time.h"
 #include "src/objects.h"
 #include "src/unicode.h"
 #include "src/zone.h"
@@ -50,7 +51,6 @@
   F(GetCachedArrayIndex, 1, 1)       \
   F(FixedArrayGet, 2, 1)             \
   F(FixedArraySet, 3, 1)             \
-  F(FastOneByteArrayJoin, 2, 1)      \
   F(ArraySpeciesConstructor, 1, 1)
 
 
@@ -73,7 +73,6 @@
   F(AtomicsFutexWakeOrRequeue, 5, 1) \
   F(AtomicsFutexNumWaitersForTesting, 2, 1)
 
-
 #define FOR_EACH_INTRINSIC_CLASSES(F)       \
   F(ThrowNonMethodError, 0, 1)              \
   F(ThrowUnsupportedSuperError, 0, 1)       \
@@ -82,18 +81,16 @@
   F(ThrowStaticPrototypeError, 0, 1)        \
   F(ThrowIfStaticPrototype, 1, 1)           \
   F(HomeObjectSymbol, 0, 1)                 \
-  F(DefineClass, 5, 1)                      \
+  F(DefineClass, 4, 1)                      \
   F(FinalizeClassDefinition, 2, 1)          \
-  F(DefineClassMethod, 3, 1)                \
-  F(LoadFromSuper, 4, 1)                    \
-  F(LoadKeyedFromSuper, 4, 1)               \
+  F(LoadFromSuper, 3, 1)                    \
+  F(LoadKeyedFromSuper, 3, 1)               \
   F(StoreToSuper_Strict, 4, 1)              \
   F(StoreToSuper_Sloppy, 4, 1)              \
   F(StoreKeyedToSuper_Strict, 4, 1)         \
   F(StoreKeyedToSuper_Sloppy, 4, 1)         \
   F(GetSuperConstructor, 1, 1)
 
-
 #define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
   F(StringGetRawHashField, 1, 1)          \
   F(TheHole, 0, 1)                        \
@@ -203,26 +200,20 @@
 
 #define FOR_EACH_INTRINSIC_FORIN(F) \
   F(ForInDone, 2, 1)                \
+  F(ForInEnumerate, 1, 1)           \
   F(ForInFilter, 2, 1)              \
   F(ForInNext, 4, 1)                \
   F(ForInStep, 1, 1)
 
-
 #define FOR_EACH_INTRINSIC_INTERPRETER(F) \
-  F(InterpreterEquals, 2, 1)              \
-  F(InterpreterNotEquals, 2, 1)           \
-  F(InterpreterStrictEquals, 2, 1)        \
-  F(InterpreterStrictNotEquals, 2, 1)     \
-  F(InterpreterLessThan, 2, 1)            \
-  F(InterpreterGreaterThan, 2, 1)         \
-  F(InterpreterLessThanOrEqual, 2, 1)     \
-  F(InterpreterGreaterThanOrEqual, 2, 1)  \
   F(InterpreterToBoolean, 1, 1)           \
   F(InterpreterLogicalNot, 1, 1)          \
   F(InterpreterTypeOf, 1, 1)              \
   F(InterpreterNewClosure, 2, 1)          \
-  F(InterpreterForInPrepare, 1, 1)
-
+  F(InterpreterTraceBytecodeEntry, 3, 1)  \
+  F(InterpreterTraceBytecodeExit, 3, 1)   \
+  F(InterpreterClearPendingMessage, 0, 1) \
+  F(InterpreterSetPendingMessage, 1, 1)
 
 #define FOR_EACH_INTRINSIC_FUNCTION(F)     \
   F(FunctionGetName, 1, 1)                 \
@@ -248,18 +239,19 @@
   F(IsFunction, 1, 1)                      \
   F(FunctionToString, 1, 1)
 
-
 #define FOR_EACH_INTRINSIC_GENERATOR(F) \
   F(CreateJSGeneratorObject, 0, 1)      \
-  F(SuspendJSGeneratorObject, -1, 1)    \
+  F(SuspendJSGeneratorObject, 1, 1)     \
   F(ResumeJSGeneratorObject, 3, 1)      \
   F(GeneratorClose, 1, 1)               \
   F(GeneratorGetFunction, 1, 1)         \
   F(GeneratorGetContext, 1, 1)          \
   F(GeneratorGetReceiver, 1, 1)         \
+  F(GeneratorGetInput, 1, 1)            \
   F(GeneratorGetContinuation, 1, 1)     \
   F(GeneratorGetSourcePosition, 1, 1)   \
   F(GeneratorNext, 2, 1)                \
+  F(GeneratorReturn, 2, 1)              \
   F(GeneratorThrow, 2, 1)
 
 
@@ -293,50 +285,52 @@
 #endif
 
 
-#define FOR_EACH_INTRINSIC_INTERNAL(F)        \
-  F(CheckIsBootstrapping, 0, 1)               \
-  F(ExportFromRuntime, 1, 1)                  \
-  F(ExportExperimentalFromRuntime, 1, 1)      \
-  F(InstallToContext, 1, 1)                   \
-  F(Throw, 1, 1)                              \
-  F(ReThrow, 1, 1)                            \
-  F(UnwindAndFindExceptionHandler, 0, 1)      \
-  F(PromoteScheduledException, 0, 1)          \
-  F(ThrowReferenceError, 1, 1)                \
-  F(ThrowApplyNonFunction, 1, 1)              \
-  F(NewTypeError, 2, 1)                       \
-  F(NewSyntaxError, 2, 1)                     \
-  F(NewReferenceError, 2, 1)                  \
-  F(ThrowIllegalInvocation, 0, 1)             \
-  F(ThrowIteratorResultNotAnObject, 1, 1)     \
-  F(ThrowStackOverflow, 0, 1)                 \
-  F(ThrowStrongModeImplicitConversion, 0, 1)  \
-  F(PromiseRejectEvent, 3, 1)                 \
-  F(PromiseRevokeReject, 1, 1)                \
-  F(StackGuard, 0, 1)                         \
-  F(Interrupt, 0, 1)                          \
-  F(AllocateInNewSpace, 1, 1)                 \
-  F(AllocateInTargetSpace, 2, 1)              \
-  F(CollectStackTrace, 2, 1)                  \
-  F(MessageGetStartPosition, 1, 1)            \
-  F(MessageGetScript, 1, 1)                   \
-  F(FormatMessageString, 4, 1)                \
-  F(CallSiteGetFileNameRT, 1, 1)              \
-  F(CallSiteGetFunctionNameRT, 1, 1)          \
-  F(CallSiteGetScriptNameOrSourceUrlRT, 1, 1) \
-  F(CallSiteGetMethodNameRT, 1, 1)            \
-  F(CallSiteGetLineNumberRT, 1, 1)            \
-  F(CallSiteGetColumnNumberRT, 1, 1)          \
-  F(CallSiteIsNativeRT, 1, 1)                 \
-  F(CallSiteIsToplevelRT, 1, 1)               \
-  F(CallSiteIsEvalRT, 1, 1)                   \
-  F(CallSiteIsConstructorRT, 1, 1)            \
-  F(IS_VAR, 1, 1)                             \
-  F(IncrementStatsCounter, 1, 1)              \
-  F(ThrowConstructedNonConstructable, 1, 1)   \
-  F(ThrowCalledNonCallable, 1, 1)             \
-  F(CreateListFromArrayLike, 1, 1)            \
-  F(IncrementUseCounter, 1, 1)
+#define FOR_EACH_INTRINSIC_INTERNAL(F)              \
+  F(CheckIsBootstrapping, 0, 1)                     \
+  F(ExportFromRuntime, 1, 1)                        \
+  F(ExportExperimentalFromRuntime, 1, 1)            \
+  F(InstallToContext, 1, 1)                         \
+  F(Throw, 1, 1)                                    \
+  F(ReThrow, 1, 1)                                  \
+  F(UnwindAndFindExceptionHandler, 0, 1)            \
+  F(PromoteScheduledException, 0, 1)                \
+  F(ThrowReferenceError, 1, 1)                      \
+  F(ThrowApplyNonFunction, 1, 1)                    \
+  F(NewTypeError, 2, 1)                             \
+  F(NewSyntaxError, 2, 1)                           \
+  F(NewReferenceError, 2, 1)                        \
+  F(ThrowIllegalInvocation, 0, 1)                   \
+  F(ThrowIteratorResultNotAnObject, 1, 1)           \
+  F(ThrowStackOverflow, 0, 1)                       \
+  F(ThrowStrongModeImplicitConversion, 0, 1)        \
+  F(PromiseRejectEvent, 3, 1)                       \
+  F(PromiseRevokeReject, 1, 1)                      \
+  F(StackGuard, 0, 1)                               \
+  F(Interrupt, 0, 1)                                \
+  F(AllocateInNewSpace, 1, 1)                       \
+  F(AllocateInTargetSpace, 2, 1)                    \
+  F(CollectStackTrace, 2, 1)                        \
+  F(MessageGetStartPosition, 1, 1)                  \
+  F(MessageGetScript, 1, 1)                         \
+  F(FormatMessageString, 4, 1)                      \
+  F(CallSiteGetFileNameRT, 1, 1)                    \
+  F(CallSiteGetFunctionNameRT, 1, 1)                \
+  F(CallSiteGetScriptNameOrSourceUrlRT, 1, 1)       \
+  F(CallSiteGetMethodNameRT, 1, 1)                  \
+  F(CallSiteGetLineNumberRT, 1, 1)                  \
+  F(CallSiteGetColumnNumberRT, 1, 1)                \
+  F(CallSiteIsNativeRT, 1, 1)                       \
+  F(CallSiteIsToplevelRT, 1, 1)                     \
+  F(CallSiteIsEvalRT, 1, 1)                         \
+  F(CallSiteIsConstructorRT, 1, 1)                  \
+  F(IS_VAR, 1, 1)                                   \
+  F(IncrementStatsCounter, 1, 1)                    \
+  F(ThrowConstructedNonConstructable, 1, 1)         \
+  F(ThrowDerivedConstructorReturnedNonObject, 0, 1) \
+  F(ThrowCalledNonCallable, 1, 1)                   \
+  F(CreateListFromArrayLike, 1, 1)                  \
+  F(IncrementUseCounter, 1, 1)                      \
+  F(GetAndResetRuntimeCallStats, 0, 1)
 
 
 #define FOR_EACH_INTRINSIC_JSON(F) \
@@ -345,12 +339,11 @@
   F(ParseJson, 1, 1)
 
 
-#define FOR_EACH_INTRINSIC_LITERALS(F)   \
-  F(CreateRegExpLiteral, 4, 1)           \
-  F(CreateObjectLiteral, 4, 1)           \
-  F(CreateArrayLiteral, 4, 1)            \
-  F(CreateArrayLiteralStubBailout, 3, 1) \
-  F(StoreArrayLiteralElement, 5, 1)
+#define FOR_EACH_INTRINSIC_LITERALS(F) \
+  F(CreateRegExpLiteral, 4, 1)         \
+  F(CreateObjectLiteral, 4, 1)         \
+  F(CreateArrayLiteral, 4, 1)          \
+  F(CreateArrayLiteralStubBailout, 3, 1)
 
 
 #define FOR_EACH_INTRINSIC_LIVEEDIT(F)              \
@@ -385,7 +378,6 @@
   F(RoundNumber, 1, 1)              \
   F(MathSqrt, 1, 1)                 \
   F(MathFround, 1, 1)               \
-  F(IsMinusZero, 1, 1)              \
   F(GenerateRandomNumbers, 1, 1)
 
 
@@ -410,18 +402,14 @@
   F(GetHoleNaNUpper, 0, 1)             \
   F(GetHoleNaNLower, 0, 1)
 
-
 #define FOR_EACH_INTRINSIC_OBJECT(F)                 \
   F(GetPrototype, 1, 1)                              \
   F(InternalSetPrototype, 2, 1)                      \
   F(SetPrototype, 2, 1)                              \
-  F(GetOwnProperty, 2, 1)                            \
   F(GetOwnProperty_Legacy, 2, 1)                     \
   F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
   F(GetProperty, 2, 1)                               \
-  F(GetPropertyStrong, 2, 1)                         \
   F(KeyedGetProperty, 2, 1)                          \
-  F(KeyedGetPropertyStrong, 2, 1)                    \
   F(LoadGlobalViaContext, 1, 1)                      \
   F(StoreGlobalViaContext_Sloppy, 2, 1)              \
   F(StoreGlobalViaContext_Strict, 2, 1)              \
@@ -434,7 +422,6 @@
   F(HasOwnProperty, 2, 1)                            \
   F(HasProperty, 2, 1)                               \
   F(PropertyIsEnumerable, 2, 1)                      \
-  F(GetPropertyNamesFast, 1, 1)                      \
   F(GetOwnPropertyKeys, 2, 1)                        \
   F(GetInterceptorInfo, 1, 1)                        \
   F(ToFastProperties, 1, 1)                          \
@@ -448,12 +435,10 @@
   F(IsJSGlobalProxy, 1, 1)                           \
   F(DefineAccessorPropertyUnchecked, 5, 1)           \
   F(DefineDataPropertyUnchecked, 4, 1)               \
+  F(DefineDataPropertyInLiteral, 5, 1)               \
   F(GetDataProperty, 2, 1)                           \
   F(HasFastPackedElements, 1, 1)                     \
   F(ValueOf, 1, 1)                                   \
-  F(SetValueOf, 2, 1)                                \
-  F(JSValueGetValue, 1, 1)                           \
-  F(ObjectEquals, 2, 1)                              \
   F(IsJSReceiver, 1, 1)                              \
   F(IsStrong, 1, 1)                                  \
   F(ClassOf, 1, 1)                                   \
@@ -468,10 +453,9 @@
   F(ToLength, 1, 1)                                  \
   F(ToString, 1, 1)                                  \
   F(ToName, 1, 1)                                    \
-  F(Equals, 2, 1)                                    \
-  F(StrictEquals, 2, 1)                              \
+  F(SameValue, 2, 1)                                 \
+  F(SameValueZero, 2, 1)                             \
   F(Compare, 3, 1)                                   \
-  F(Compare_Strong, 3, 1)                            \
   F(InstanceOf, 2, 1)                                \
   F(HasInPrototypeChain, 2, 1)                       \
   F(CreateIterResultObject, 2, 1)                    \
@@ -479,7 +463,6 @@
   F(ObjectDefineProperties, 2, 1)                    \
   F(ObjectDefineProperty, 3, 1)
 
-
 #define FOR_EACH_INTRINSIC_OBSERVE(F)            \
   F(IsObserved, 1, 1)                            \
   F(SetIsObserved, 1, 1)                         \
@@ -493,30 +476,26 @@
   F(GetObjectContextObjectGetNotifier, 1, 1)     \
   F(GetObjectContextNotifierPerformChange, 1, 1)
 
-
 #define FOR_EACH_INTRINSIC_OPERATORS(F) \
   F(Multiply, 2, 1)                     \
-  F(Multiply_Strong, 2, 1)              \
   F(Divide, 2, 1)                       \
-  F(Divide_Strong, 2, 1)                \
   F(Modulus, 2, 1)                      \
-  F(Modulus_Strong, 2, 1)               \
   F(Add, 2, 1)                          \
-  F(Add_Strong, 2, 1)                   \
   F(Subtract, 2, 1)                     \
-  F(Subtract_Strong, 2, 1)              \
   F(ShiftLeft, 2, 1)                    \
-  F(ShiftLeft_Strong, 2, 1)             \
   F(ShiftRight, 2, 1)                   \
-  F(ShiftRight_Strong, 2, 1)            \
   F(ShiftRightLogical, 2, 1)            \
-  F(ShiftRightLogical_Strong, 2, 1)     \
   F(BitwiseAnd, 2, 1)                   \
-  F(BitwiseAnd_Strong, 2, 1)            \
   F(BitwiseOr, 2, 1)                    \
-  F(BitwiseOr_Strong, 2, 1)             \
   F(BitwiseXor, 2, 1)                   \
-  F(BitwiseXor_Strong, 2, 1)
+  F(Equal, 2, 1)                        \
+  F(NotEqual, 2, 1)                     \
+  F(StrictEqual, 2, 1)                  \
+  F(StrictNotEqual, 2, 1)               \
+  F(LessThan, 2, 1)                     \
+  F(GreaterThan, 2, 1)                  \
+  F(LessThanOrEqual, 2, 1)              \
+  F(GreaterThanOrEqual, 2, 1)
 
 #define FOR_EACH_INTRINSIC_PROXY(F)     \
   F(IsJSProxy, 1, 1)                    \
@@ -538,7 +517,6 @@
   F(RegExpExecReThrow, 4, 1)                   \
   F(IsRegExp, 1, 1)
 
-
 #define FOR_EACH_INTRINSIC_SCOPES(F)       \
   F(ThrowConstAssignError, 0, 1)           \
   F(DeclareGlobals, 2, 1)                  \
@@ -547,11 +525,9 @@
   F(DeclareLookupSlot, 3, 1)               \
   F(InitializeLegacyConstLookupSlot, 3, 1) \
   F(NewSloppyArguments_Generic, 1, 1)      \
-  F(NewStrictArguments_Generic, 1, 1)      \
-  F(NewRestArguments_Generic, 2, 1)        \
+  F(NewStrictArguments, 1, 1)              \
+  F(NewRestParameter, 1, 1)                \
   F(NewSloppyArguments, 3, 1)              \
-  F(NewStrictArguments, 3, 1)              \
-  F(NewRestParam, 3, 1)                    \
   F(NewClosure, 1, 1)                      \
   F(NewClosure_Tenured, 1, 1)              \
   F(NewScriptContext, 2, 1)                \
@@ -562,317 +538,321 @@
   F(IsJSModule, 1, 1)                      \
   F(PushModuleContext, 2, 1)               \
   F(DeclareModules, 1, 1)                  \
-  F(DeleteLookupSlot, 2, 1)                \
-  F(StoreLookupSlot, 4, 1)                 \
-  F(ArgumentsLength, 0, 1)                 \
-  F(Arguments, 1, 1)
+  F(DeleteLookupSlot, 1, 1)                \
+  F(LoadLookupSlot, 1, 1)                  \
+  F(LoadLookupSlotInsideTypeof, 1, 1)      \
+  F(StoreLookupSlot_Sloppy, 2, 1)          \
+  F(StoreLookupSlot_Strict, 2, 1)
 
-
-#define FOR_EACH_INTRINSIC_SIMD(F)           \
-  F(IsSimdValue, 1, 1)                       \
-  F(SimdSameValue, 2, 1)                     \
-  F(SimdSameValueZero, 2, 1)                 \
-  F(CreateFloat32x4, 4, 1)                   \
-  F(CreateInt32x4, 4, 1)                     \
-  F(CreateUint32x4, 4, 1)                    \
-  F(CreateBool32x4, 4, 1)                    \
-  F(CreateInt16x8, 8, 1)                     \
-  F(CreateUint16x8, 8, 1)                    \
-  F(CreateBool16x8, 8, 1)                    \
-  F(CreateInt8x16, 16, 1)                    \
-  F(CreateUint8x16, 16, 1)                   \
-  F(CreateBool8x16, 16, 1)                   \
-  F(Float32x4Check, 1, 1)                    \
-  F(Float32x4ExtractLane, 2, 1)              \
-  F(Float32x4ReplaceLane, 3, 1)              \
-  F(Float32x4Abs, 1, 1)                      \
-  F(Float32x4Neg, 1, 1)                      \
-  F(Float32x4Sqrt, 1, 1)                     \
-  F(Float32x4RecipApprox, 1, 1)              \
-  F(Float32x4RecipSqrtApprox, 1, 1)          \
-  F(Float32x4Add, 2, 1)                      \
-  F(Float32x4Sub, 2, 1)                      \
-  F(Float32x4Mul, 2, 1)                      \
-  F(Float32x4Div, 2, 1)                      \
-  F(Float32x4Min, 2, 1)                      \
-  F(Float32x4Max, 2, 1)                      \
-  F(Float32x4MinNum, 2, 1)                   \
-  F(Float32x4MaxNum, 2, 1)                   \
-  F(Float32x4Equal, 2, 1)                    \
-  F(Float32x4NotEqual, 2, 1)                 \
-  F(Float32x4LessThan, 2, 1)                 \
-  F(Float32x4LessThanOrEqual, 2, 1)          \
-  F(Float32x4GreaterThan, 2, 1)              \
-  F(Float32x4GreaterThanOrEqual, 2, 1)       \
-  F(Float32x4Select, 3, 1)                   \
-  F(Float32x4Swizzle, 5, 1)                  \
-  F(Float32x4Shuffle, 6, 1)                  \
-  F(Float32x4FromInt32x4, 1, 1)              \
-  F(Float32x4FromUint32x4, 1, 1)             \
-  F(Float32x4FromInt32x4Bits, 1, 1)          \
-  F(Float32x4FromUint32x4Bits, 1, 1)         \
-  F(Float32x4FromInt16x8Bits, 1, 1)          \
-  F(Float32x4FromUint16x8Bits, 1, 1)         \
-  F(Float32x4FromInt8x16Bits, 1, 1)          \
-  F(Float32x4FromUint8x16Bits, 1, 1)         \
-  F(Float32x4Load, 2, 1)                     \
-  F(Float32x4Load1, 2, 1)                    \
-  F(Float32x4Load2, 2, 1)                    \
-  F(Float32x4Load3, 2, 1)                    \
-  F(Float32x4Store, 3, 1)                    \
-  F(Float32x4Store1, 3, 1)                   \
-  F(Float32x4Store2, 3, 1)                   \
-  F(Float32x4Store3, 3, 1)                   \
-  F(Int32x4Check, 1, 1)                      \
-  F(Int32x4ExtractLane, 2, 1)                \
-  F(Int32x4ReplaceLane, 3, 1)                \
-  F(Int32x4Neg, 1, 1)                        \
-  F(Int32x4Add, 2, 1)                        \
-  F(Int32x4Sub, 2, 1)                        \
-  F(Int32x4Mul, 2, 1)                        \
-  F(Int32x4Min, 2, 1)                        \
-  F(Int32x4Max, 2, 1)                        \
-  F(Int32x4And, 2, 1)                        \
-  F(Int32x4Or, 2, 1)                         \
-  F(Int32x4Xor, 2, 1)                        \
-  F(Int32x4Not, 1, 1)                        \
-  F(Int32x4ShiftLeftByScalar, 2, 1)          \
-  F(Int32x4ShiftRightByScalar, 2, 1)         \
-  F(Int32x4Equal, 2, 1)                      \
-  F(Int32x4NotEqual, 2, 1)                   \
-  F(Int32x4LessThan, 2, 1)                   \
-  F(Int32x4LessThanOrEqual, 2, 1)            \
-  F(Int32x4GreaterThan, 2, 1)                \
-  F(Int32x4GreaterThanOrEqual, 2, 1)         \
-  F(Int32x4Select, 3, 1)                     \
-  F(Int32x4Swizzle, 5, 1)                    \
-  F(Int32x4Shuffle, 6, 1)                    \
-  F(Int32x4FromFloat32x4, 1, 1)              \
-  F(Int32x4FromUint32x4, 1, 1)               \
-  F(Int32x4FromFloat32x4Bits, 1, 1)          \
-  F(Int32x4FromUint32x4Bits, 1, 1)           \
-  F(Int32x4FromInt16x8Bits, 1, 1)            \
-  F(Int32x4FromUint16x8Bits, 1, 1)           \
-  F(Int32x4FromInt8x16Bits, 1, 1)            \
-  F(Int32x4FromUint8x16Bits, 1, 1)           \
-  F(Int32x4Load, 2, 1)                       \
-  F(Int32x4Load1, 2, 1)                      \
-  F(Int32x4Load2, 2, 1)                      \
-  F(Int32x4Load3, 2, 1)                      \
-  F(Int32x4Store, 3, 1)                      \
-  F(Int32x4Store1, 3, 1)                     \
-  F(Int32x4Store2, 3, 1)                     \
-  F(Int32x4Store3, 3, 1)                     \
-  F(Uint32x4Check, 1, 1)                     \
-  F(Uint32x4ExtractLane, 2, 1)               \
-  F(Uint32x4ReplaceLane, 3, 1)               \
-  F(Uint32x4Add, 2, 1)                       \
-  F(Uint32x4Sub, 2, 1)                       \
-  F(Uint32x4Mul, 2, 1)                       \
-  F(Uint32x4Min, 2, 1)                       \
-  F(Uint32x4Max, 2, 1)                       \
-  F(Uint32x4And, 2, 1)                       \
-  F(Uint32x4Or, 2, 1)                        \
-  F(Uint32x4Xor, 2, 1)                       \
-  F(Uint32x4Not, 1, 1)                       \
-  F(Uint32x4ShiftLeftByScalar, 2, 1)         \
-  F(Uint32x4ShiftRightByScalar, 2, 1)        \
-  F(Uint32x4Equal, 2, 1)                     \
-  F(Uint32x4NotEqual, 2, 1)                  \
-  F(Uint32x4LessThan, 2, 1)                  \
-  F(Uint32x4LessThanOrEqual, 2, 1)           \
-  F(Uint32x4GreaterThan, 2, 1)               \
-  F(Uint32x4GreaterThanOrEqual, 2, 1)        \
-  F(Uint32x4Select, 3, 1)                    \
-  F(Uint32x4Swizzle, 5, 1)                   \
-  F(Uint32x4Shuffle, 6, 1)                   \
-  F(Uint32x4FromFloat32x4, 1, 1)             \
-  F(Uint32x4FromInt32x4, 1, 1)               \
-  F(Uint32x4FromFloat32x4Bits, 1, 1)         \
-  F(Uint32x4FromInt32x4Bits, 1, 1)           \
-  F(Uint32x4FromInt16x8Bits, 1, 1)           \
-  F(Uint32x4FromUint16x8Bits, 1, 1)          \
-  F(Uint32x4FromInt8x16Bits, 1, 1)           \
-  F(Uint32x4FromUint8x16Bits, 1, 1)          \
-  F(Uint32x4Load, 2, 1)                      \
-  F(Uint32x4Load1, 2, 1)                     \
-  F(Uint32x4Load2, 2, 1)                     \
-  F(Uint32x4Load3, 2, 1)                     \
-  F(Uint32x4Store, 3, 1)                     \
-  F(Uint32x4Store1, 3, 1)                    \
-  F(Uint32x4Store2, 3, 1)                    \
-  F(Uint32x4Store3, 3, 1)                    \
-  F(Bool32x4Check, 1, 1)                     \
-  F(Bool32x4ExtractLane, 2, 1)               \
-  F(Bool32x4ReplaceLane, 3, 1)               \
-  F(Bool32x4And, 2, 1)                       \
-  F(Bool32x4Or, 2, 1)                        \
-  F(Bool32x4Xor, 2, 1)                       \
-  F(Bool32x4Not, 1, 1)                       \
-  F(Bool32x4AnyTrue, 1, 1)                   \
-  F(Bool32x4AllTrue, 1, 1)                   \
-  F(Bool32x4Swizzle, 5, 1)                   \
-  F(Bool32x4Shuffle, 6, 1)                   \
-  F(Int16x8Check, 1, 1)                      \
-  F(Int16x8ExtractLane, 2, 1)                \
-  F(Int16x8ReplaceLane, 3, 1)                \
-  F(Int16x8Neg, 1, 1)                        \
-  F(Int16x8Add, 2, 1)                        \
-  F(Int16x8AddSaturate, 2, 1)                \
-  F(Int16x8Sub, 2, 1)                        \
-  F(Int16x8SubSaturate, 2, 1)                \
-  F(Int16x8Mul, 2, 1)                        \
-  F(Int16x8Min, 2, 1)                        \
-  F(Int16x8Max, 2, 1)                        \
-  F(Int16x8And, 2, 1)                        \
-  F(Int16x8Or, 2, 1)                         \
-  F(Int16x8Xor, 2, 1)                        \
-  F(Int16x8Not, 1, 1)                        \
-  F(Int16x8ShiftLeftByScalar, 2, 1)          \
-  F(Int16x8ShiftRightByScalar, 2, 1)         \
-  F(Int16x8Equal, 2, 1)                      \
-  F(Int16x8NotEqual, 2, 1)                   \
-  F(Int16x8LessThan, 2, 1)                   \
-  F(Int16x8LessThanOrEqual, 2, 1)            \
-  F(Int16x8GreaterThan, 2, 1)                \
-  F(Int16x8GreaterThanOrEqual, 2, 1)         \
-  F(Int16x8Select, 3, 1)                     \
-  F(Int16x8Swizzle, 9, 1)                    \
-  F(Int16x8Shuffle, 10, 1)                   \
-  F(Int16x8FromUint16x8, 1, 1)               \
-  F(Int16x8FromFloat32x4Bits, 1, 1)          \
-  F(Int16x8FromInt32x4Bits, 1, 1)            \
-  F(Int16x8FromUint32x4Bits, 1, 1)           \
-  F(Int16x8FromUint16x8Bits, 1, 1)           \
-  F(Int16x8FromInt8x16Bits, 1, 1)            \
-  F(Int16x8FromUint8x16Bits, 1, 1)           \
-  F(Int16x8Load, 2, 1)                       \
-  F(Int16x8Store, 3, 1)                      \
-  F(Uint16x8Check, 1, 1)                     \
-  F(Uint16x8ExtractLane, 2, 1)               \
-  F(Uint16x8ReplaceLane, 3, 1)               \
-  F(Uint16x8Add, 2, 1)                       \
-  F(Uint16x8AddSaturate, 2, 1)               \
-  F(Uint16x8Sub, 2, 1)                       \
-  F(Uint16x8SubSaturate, 2, 1)               \
-  F(Uint16x8Mul, 2, 1)                       \
-  F(Uint16x8Min, 2, 1)                       \
-  F(Uint16x8Max, 2, 1)                       \
-  F(Uint16x8And, 2, 1)                       \
-  F(Uint16x8Or, 2, 1)                        \
-  F(Uint16x8Xor, 2, 1)                       \
-  F(Uint16x8Not, 1, 1)                       \
-  F(Uint16x8ShiftLeftByScalar, 2, 1)         \
-  F(Uint16x8ShiftRightByScalar, 2, 1)        \
-  F(Uint16x8Equal, 2, 1)                     \
-  F(Uint16x8NotEqual, 2, 1)                  \
-  F(Uint16x8LessThan, 2, 1)                  \
-  F(Uint16x8LessThanOrEqual, 2, 1)           \
-  F(Uint16x8GreaterThan, 2, 1)               \
-  F(Uint16x8GreaterThanOrEqual, 2, 1)        \
-  F(Uint16x8Select, 3, 1)                    \
-  F(Uint16x8Swizzle, 9, 1)                   \
-  F(Uint16x8Shuffle, 10, 1)                  \
-  F(Uint16x8FromInt16x8, 1, 1)               \
-  F(Uint16x8FromFloat32x4Bits, 1, 1)         \
-  F(Uint16x8FromInt32x4Bits, 1, 1)           \
-  F(Uint16x8FromUint32x4Bits, 1, 1)          \
-  F(Uint16x8FromInt16x8Bits, 1, 1)           \
-  F(Uint16x8FromInt8x16Bits, 1, 1)           \
-  F(Uint16x8FromUint8x16Bits, 1, 1)          \
-  F(Uint16x8Load, 2, 1)                      \
-  F(Uint16x8Store, 3, 1)                     \
-  F(Bool16x8Check, 1, 1)                     \
-  F(Bool16x8ExtractLane, 2, 1)               \
-  F(Bool16x8ReplaceLane, 3, 1)               \
-  F(Bool16x8And, 2, 1)                       \
-  F(Bool16x8Or, 2, 1)                        \
-  F(Bool16x8Xor, 2, 1)                       \
-  F(Bool16x8Not, 1, 1)                       \
-  F(Bool16x8AnyTrue, 1, 1)                   \
-  F(Bool16x8AllTrue, 1, 1)                   \
-  F(Bool16x8Swizzle, 9, 1)                   \
-  F(Bool16x8Shuffle, 10, 1)                  \
-  F(Int8x16Check, 1, 1)                      \
-  F(Int8x16ExtractLane, 2, 1)                \
-  F(Int8x16ReplaceLane, 3, 1)                \
-  F(Int8x16Neg, 1, 1)                        \
-  F(Int8x16Add, 2, 1)                        \
-  F(Int8x16AddSaturate, 2, 1)                \
-  F(Int8x16Sub, 2, 1)                        \
-  F(Int8x16SubSaturate, 2, 1)                \
-  F(Int8x16Mul, 2, 1)                        \
-  F(Int8x16Min, 2, 1)                        \
-  F(Int8x16Max, 2, 1)                        \
-  F(Int8x16And, 2, 1)                        \
-  F(Int8x16Or, 2, 1)                         \
-  F(Int8x16Xor, 2, 1)                        \
-  F(Int8x16Not, 1, 1)                        \
-  F(Int8x16ShiftLeftByScalar, 2, 1)          \
-  F(Int8x16ShiftRightByScalar, 2, 1)         \
-  F(Int8x16Equal, 2, 1)                      \
-  F(Int8x16NotEqual, 2, 1)                   \
-  F(Int8x16LessThan, 2, 1)                   \
-  F(Int8x16LessThanOrEqual, 2, 1)            \
-  F(Int8x16GreaterThan, 2, 1)                \
-  F(Int8x16GreaterThanOrEqual, 2, 1)         \
-  F(Int8x16Select, 3, 1)                     \
-  F(Int8x16Swizzle, 17, 1)                   \
-  F(Int8x16Shuffle, 18, 1)                   \
-  F(Int8x16FromUint8x16, 1, 1)               \
-  F(Int8x16FromFloat32x4Bits, 1, 1)          \
-  F(Int8x16FromInt32x4Bits, 1, 1)            \
-  F(Int8x16FromUint32x4Bits, 1, 1)           \
-  F(Int8x16FromInt16x8Bits, 1, 1)            \
-  F(Int8x16FromUint16x8Bits, 1, 1)           \
-  F(Int8x16FromUint8x16Bits, 1, 1)           \
-  F(Int8x16Load, 2, 1)                       \
-  F(Int8x16Store, 3, 1)                      \
-  F(Uint8x16Check, 1, 1)                     \
-  F(Uint8x16ExtractLane, 2, 1)               \
-  F(Uint8x16ReplaceLane, 3, 1)               \
-  F(Uint8x16Add, 2, 1)                       \
-  F(Uint8x16AddSaturate, 2, 1)               \
-  F(Uint8x16Sub, 2, 1)                       \
-  F(Uint8x16SubSaturate, 2, 1)               \
-  F(Uint8x16Mul, 2, 1)                       \
-  F(Uint8x16Min, 2, 1)                       \
-  F(Uint8x16Max, 2, 1)                       \
-  F(Uint8x16And, 2, 1)                       \
-  F(Uint8x16Or, 2, 1)                        \
-  F(Uint8x16Xor, 2, 1)                       \
-  F(Uint8x16Not, 1, 1)                       \
-  F(Uint8x16ShiftLeftByScalar, 2, 1)         \
-  F(Uint8x16ShiftRightByScalar, 2, 1)        \
-  F(Uint8x16Equal, 2, 1)                     \
-  F(Uint8x16NotEqual, 2, 1)                  \
-  F(Uint8x16LessThan, 2, 1)                  \
-  F(Uint8x16LessThanOrEqual, 2, 1)           \
-  F(Uint8x16GreaterThan, 2, 1)               \
-  F(Uint8x16GreaterThanOrEqual, 2, 1)        \
-  F(Uint8x16Select, 3, 1)                    \
-  F(Uint8x16Swizzle, 17, 1)                  \
-  F(Uint8x16Shuffle, 18, 1)                  \
-  F(Uint8x16FromInt8x16, 1, 1)               \
-  F(Uint8x16FromFloat32x4Bits, 1, 1)         \
-  F(Uint8x16FromInt32x4Bits, 1, 1)           \
-  F(Uint8x16FromUint32x4Bits, 1, 1)          \
-  F(Uint8x16FromInt16x8Bits, 1, 1)           \
-  F(Uint8x16FromUint16x8Bits, 1, 1)          \
-  F(Uint8x16FromInt8x16Bits, 1, 1)           \
-  F(Uint8x16Load, 2, 1)                      \
-  F(Uint8x16Store, 3, 1)                     \
-  F(Bool8x16Check, 1, 1)                     \
-  F(Bool8x16ExtractLane, 2, 1)               \
-  F(Bool8x16ReplaceLane, 3, 1)               \
-  F(Bool8x16And, 2, 1)                       \
-  F(Bool8x16Or, 2, 1)                        \
-  F(Bool8x16Xor, 2, 1)                       \
-  F(Bool8x16Not, 1, 1)                       \
-  F(Bool8x16AnyTrue, 1, 1)                   \
-  F(Bool8x16AllTrue, 1, 1)                   \
-  F(Bool8x16Swizzle, 17, 1)                  \
-  F(Bool8x16Shuffle, 18, 1)
+#define FOR_EACH_INTRINSIC_SIMD(F)     \
+  F(IsSimdValue, 1, 1)                 \
+  F(CreateFloat32x4, 4, 1)             \
+  F(CreateInt32x4, 4, 1)               \
+  F(CreateUint32x4, 4, 1)              \
+  F(CreateBool32x4, 4, 1)              \
+  F(CreateInt16x8, 8, 1)               \
+  F(CreateUint16x8, 8, 1)              \
+  F(CreateBool16x8, 8, 1)              \
+  F(CreateInt8x16, 16, 1)              \
+  F(CreateUint8x16, 16, 1)             \
+  F(CreateBool8x16, 16, 1)             \
+  F(Float32x4Check, 1, 1)              \
+  F(Float32x4ExtractLane, 2, 1)        \
+  F(Float32x4ReplaceLane, 3, 1)        \
+  F(Float32x4Abs, 1, 1)                \
+  F(Float32x4Neg, 1, 1)                \
+  F(Float32x4Sqrt, 1, 1)               \
+  F(Float32x4RecipApprox, 1, 1)        \
+  F(Float32x4RecipSqrtApprox, 1, 1)    \
+  F(Float32x4Add, 2, 1)                \
+  F(Float32x4Sub, 2, 1)                \
+  F(Float32x4Mul, 2, 1)                \
+  F(Float32x4Div, 2, 1)                \
+  F(Float32x4Min, 2, 1)                \
+  F(Float32x4Max, 2, 1)                \
+  F(Float32x4MinNum, 2, 1)             \
+  F(Float32x4MaxNum, 2, 1)             \
+  F(Float32x4Equal, 2, 1)              \
+  F(Float32x4NotEqual, 2, 1)           \
+  F(Float32x4LessThan, 2, 1)           \
+  F(Float32x4LessThanOrEqual, 2, 1)    \
+  F(Float32x4GreaterThan, 2, 1)        \
+  F(Float32x4GreaterThanOrEqual, 2, 1) \
+  F(Float32x4Select, 3, 1)             \
+  F(Float32x4Swizzle, 5, 1)            \
+  F(Float32x4Shuffle, 6, 1)            \
+  F(Float32x4FromInt32x4, 1, 1)        \
+  F(Float32x4FromUint32x4, 1, 1)       \
+  F(Float32x4FromInt32x4Bits, 1, 1)    \
+  F(Float32x4FromUint32x4Bits, 1, 1)   \
+  F(Float32x4FromInt16x8Bits, 1, 1)    \
+  F(Float32x4FromUint16x8Bits, 1, 1)   \
+  F(Float32x4FromInt8x16Bits, 1, 1)    \
+  F(Float32x4FromUint8x16Bits, 1, 1)   \
+  F(Float32x4Load, 2, 1)               \
+  F(Float32x4Load1, 2, 1)              \
+  F(Float32x4Load2, 2, 1)              \
+  F(Float32x4Load3, 2, 1)              \
+  F(Float32x4Store, 3, 1)              \
+  F(Float32x4Store1, 3, 1)             \
+  F(Float32x4Store2, 3, 1)             \
+  F(Float32x4Store3, 3, 1)             \
+  F(Int32x4Check, 1, 1)                \
+  F(Int32x4ExtractLane, 2, 1)          \
+  F(Int32x4ReplaceLane, 3, 1)          \
+  F(Int32x4Neg, 1, 1)                  \
+  F(Int32x4Add, 2, 1)                  \
+  F(Int32x4Sub, 2, 1)                  \
+  F(Int32x4Mul, 2, 1)                  \
+  F(Int32x4Min, 2, 1)                  \
+  F(Int32x4Max, 2, 1)                  \
+  F(Int32x4And, 2, 1)                  \
+  F(Int32x4Or, 2, 1)                   \
+  F(Int32x4Xor, 2, 1)                  \
+  F(Int32x4Not, 1, 1)                  \
+  F(Int32x4ShiftLeftByScalar, 2, 1)    \
+  F(Int32x4ShiftRightByScalar, 2, 1)   \
+  F(Int32x4Equal, 2, 1)                \
+  F(Int32x4NotEqual, 2, 1)             \
+  F(Int32x4LessThan, 2, 1)             \
+  F(Int32x4LessThanOrEqual, 2, 1)      \
+  F(Int32x4GreaterThan, 2, 1)          \
+  F(Int32x4GreaterThanOrEqual, 2, 1)   \
+  F(Int32x4Select, 3, 1)               \
+  F(Int32x4Swizzle, 5, 1)              \
+  F(Int32x4Shuffle, 6, 1)              \
+  F(Int32x4FromFloat32x4, 1, 1)        \
+  F(Int32x4FromUint32x4, 1, 1)         \
+  F(Int32x4FromFloat32x4Bits, 1, 1)    \
+  F(Int32x4FromUint32x4Bits, 1, 1)     \
+  F(Int32x4FromInt16x8Bits, 1, 1)      \
+  F(Int32x4FromUint16x8Bits, 1, 1)     \
+  F(Int32x4FromInt8x16Bits, 1, 1)      \
+  F(Int32x4FromUint8x16Bits, 1, 1)     \
+  F(Int32x4Load, 2, 1)                 \
+  F(Int32x4Load1, 2, 1)                \
+  F(Int32x4Load2, 2, 1)                \
+  F(Int32x4Load3, 2, 1)                \
+  F(Int32x4Store, 3, 1)                \
+  F(Int32x4Store1, 3, 1)               \
+  F(Int32x4Store2, 3, 1)               \
+  F(Int32x4Store3, 3, 1)               \
+  F(Uint32x4Check, 1, 1)               \
+  F(Uint32x4ExtractLane, 2, 1)         \
+  F(Uint32x4ReplaceLane, 3, 1)         \
+  F(Uint32x4Add, 2, 1)                 \
+  F(Uint32x4Sub, 2, 1)                 \
+  F(Uint32x4Mul, 2, 1)                 \
+  F(Uint32x4Min, 2, 1)                 \
+  F(Uint32x4Max, 2, 1)                 \
+  F(Uint32x4And, 2, 1)                 \
+  F(Uint32x4Or, 2, 1)                  \
+  F(Uint32x4Xor, 2, 1)                 \
+  F(Uint32x4Not, 1, 1)                 \
+  F(Uint32x4ShiftLeftByScalar, 2, 1)   \
+  F(Uint32x4ShiftRightByScalar, 2, 1)  \
+  F(Uint32x4Equal, 2, 1)               \
+  F(Uint32x4NotEqual, 2, 1)            \
+  F(Uint32x4LessThan, 2, 1)            \
+  F(Uint32x4LessThanOrEqual, 2, 1)     \
+  F(Uint32x4GreaterThan, 2, 1)         \
+  F(Uint32x4GreaterThanOrEqual, 2, 1)  \
+  F(Uint32x4Select, 3, 1)              \
+  F(Uint32x4Swizzle, 5, 1)             \
+  F(Uint32x4Shuffle, 6, 1)             \
+  F(Uint32x4FromFloat32x4, 1, 1)       \
+  F(Uint32x4FromInt32x4, 1, 1)         \
+  F(Uint32x4FromFloat32x4Bits, 1, 1)   \
+  F(Uint32x4FromInt32x4Bits, 1, 1)     \
+  F(Uint32x4FromInt16x8Bits, 1, 1)     \
+  F(Uint32x4FromUint16x8Bits, 1, 1)    \
+  F(Uint32x4FromInt8x16Bits, 1, 1)     \
+  F(Uint32x4FromUint8x16Bits, 1, 1)    \
+  F(Uint32x4Load, 2, 1)                \
+  F(Uint32x4Load1, 2, 1)               \
+  F(Uint32x4Load2, 2, 1)               \
+  F(Uint32x4Load3, 2, 1)               \
+  F(Uint32x4Store, 3, 1)               \
+  F(Uint32x4Store1, 3, 1)              \
+  F(Uint32x4Store2, 3, 1)              \
+  F(Uint32x4Store3, 3, 1)              \
+  F(Bool32x4Check, 1, 1)               \
+  F(Bool32x4ExtractLane, 2, 1)         \
+  F(Bool32x4ReplaceLane, 3, 1)         \
+  F(Bool32x4And, 2, 1)                 \
+  F(Bool32x4Or, 2, 1)                  \
+  F(Bool32x4Xor, 2, 1)                 \
+  F(Bool32x4Not, 1, 1)                 \
+  F(Bool32x4AnyTrue, 1, 1)             \
+  F(Bool32x4AllTrue, 1, 1)             \
+  F(Bool32x4Swizzle, 5, 1)             \
+  F(Bool32x4Shuffle, 6, 1)             \
+  F(Bool32x4Equal, 2, 1)               \
+  F(Bool32x4NotEqual, 2, 1)            \
+  F(Int16x8Check, 1, 1)                \
+  F(Int16x8ExtractLane, 2, 1)          \
+  F(Int16x8ReplaceLane, 3, 1)          \
+  F(Int16x8Neg, 1, 1)                  \
+  F(Int16x8Add, 2, 1)                  \
+  F(Int16x8AddSaturate, 2, 1)          \
+  F(Int16x8Sub, 2, 1)                  \
+  F(Int16x8SubSaturate, 2, 1)          \
+  F(Int16x8Mul, 2, 1)                  \
+  F(Int16x8Min, 2, 1)                  \
+  F(Int16x8Max, 2, 1)                  \
+  F(Int16x8And, 2, 1)                  \
+  F(Int16x8Or, 2, 1)                   \
+  F(Int16x8Xor, 2, 1)                  \
+  F(Int16x8Not, 1, 1)                  \
+  F(Int16x8ShiftLeftByScalar, 2, 1)    \
+  F(Int16x8ShiftRightByScalar, 2, 1)   \
+  F(Int16x8Equal, 2, 1)                \
+  F(Int16x8NotEqual, 2, 1)             \
+  F(Int16x8LessThan, 2, 1)             \
+  F(Int16x8LessThanOrEqual, 2, 1)      \
+  F(Int16x8GreaterThan, 2, 1)          \
+  F(Int16x8GreaterThanOrEqual, 2, 1)   \
+  F(Int16x8Select, 3, 1)               \
+  F(Int16x8Swizzle, 9, 1)              \
+  F(Int16x8Shuffle, 10, 1)             \
+  F(Int16x8FromUint16x8, 1, 1)         \
+  F(Int16x8FromFloat32x4Bits, 1, 1)    \
+  F(Int16x8FromInt32x4Bits, 1, 1)      \
+  F(Int16x8FromUint32x4Bits, 1, 1)     \
+  F(Int16x8FromUint16x8Bits, 1, 1)     \
+  F(Int16x8FromInt8x16Bits, 1, 1)      \
+  F(Int16x8FromUint8x16Bits, 1, 1)     \
+  F(Int16x8Load, 2, 1)                 \
+  F(Int16x8Store, 3, 1)                \
+  F(Uint16x8Check, 1, 1)               \
+  F(Uint16x8ExtractLane, 2, 1)         \
+  F(Uint16x8ReplaceLane, 3, 1)         \
+  F(Uint16x8Add, 2, 1)                 \
+  F(Uint16x8AddSaturate, 2, 1)         \
+  F(Uint16x8Sub, 2, 1)                 \
+  F(Uint16x8SubSaturate, 2, 1)         \
+  F(Uint16x8Mul, 2, 1)                 \
+  F(Uint16x8Min, 2, 1)                 \
+  F(Uint16x8Max, 2, 1)                 \
+  F(Uint16x8And, 2, 1)                 \
+  F(Uint16x8Or, 2, 1)                  \
+  F(Uint16x8Xor, 2, 1)                 \
+  F(Uint16x8Not, 1, 1)                 \
+  F(Uint16x8ShiftLeftByScalar, 2, 1)   \
+  F(Uint16x8ShiftRightByScalar, 2, 1)  \
+  F(Uint16x8Equal, 2, 1)               \
+  F(Uint16x8NotEqual, 2, 1)            \
+  F(Uint16x8LessThan, 2, 1)            \
+  F(Uint16x8LessThanOrEqual, 2, 1)     \
+  F(Uint16x8GreaterThan, 2, 1)         \
+  F(Uint16x8GreaterThanOrEqual, 2, 1)  \
+  F(Uint16x8Select, 3, 1)              \
+  F(Uint16x8Swizzle, 9, 1)             \
+  F(Uint16x8Shuffle, 10, 1)            \
+  F(Uint16x8FromInt16x8, 1, 1)         \
+  F(Uint16x8FromFloat32x4Bits, 1, 1)   \
+  F(Uint16x8FromInt32x4Bits, 1, 1)     \
+  F(Uint16x8FromUint32x4Bits, 1, 1)    \
+  F(Uint16x8FromInt16x8Bits, 1, 1)     \
+  F(Uint16x8FromInt8x16Bits, 1, 1)     \
+  F(Uint16x8FromUint8x16Bits, 1, 1)    \
+  F(Uint16x8Load, 2, 1)                \
+  F(Uint16x8Store, 3, 1)               \
+  F(Bool16x8Check, 1, 1)               \
+  F(Bool16x8ExtractLane, 2, 1)         \
+  F(Bool16x8ReplaceLane, 3, 1)         \
+  F(Bool16x8And, 2, 1)                 \
+  F(Bool16x8Or, 2, 1)                  \
+  F(Bool16x8Xor, 2, 1)                 \
+  F(Bool16x8Not, 1, 1)                 \
+  F(Bool16x8AnyTrue, 1, 1)             \
+  F(Bool16x8AllTrue, 1, 1)             \
+  F(Bool16x8Swizzle, 9, 1)             \
+  F(Bool16x8Shuffle, 10, 1)            \
+  F(Bool16x8Equal, 2, 1)               \
+  F(Bool16x8NotEqual, 2, 1)            \
+  F(Int8x16Check, 1, 1)                \
+  F(Int8x16ExtractLane, 2, 1)          \
+  F(Int8x16ReplaceLane, 3, 1)          \
+  F(Int8x16Neg, 1, 1)                  \
+  F(Int8x16Add, 2, 1)                  \
+  F(Int8x16AddSaturate, 2, 1)          \
+  F(Int8x16Sub, 2, 1)                  \
+  F(Int8x16SubSaturate, 2, 1)          \
+  F(Int8x16Mul, 2, 1)                  \
+  F(Int8x16Min, 2, 1)                  \
+  F(Int8x16Max, 2, 1)                  \
+  F(Int8x16And, 2, 1)                  \
+  F(Int8x16Or, 2, 1)                   \
+  F(Int8x16Xor, 2, 1)                  \
+  F(Int8x16Not, 1, 1)                  \
+  F(Int8x16ShiftLeftByScalar, 2, 1)    \
+  F(Int8x16ShiftRightByScalar, 2, 1)   \
+  F(Int8x16Equal, 2, 1)                \
+  F(Int8x16NotEqual, 2, 1)             \
+  F(Int8x16LessThan, 2, 1)             \
+  F(Int8x16LessThanOrEqual, 2, 1)      \
+  F(Int8x16GreaterThan, 2, 1)          \
+  F(Int8x16GreaterThanOrEqual, 2, 1)   \
+  F(Int8x16Select, 3, 1)               \
+  F(Int8x16Swizzle, 17, 1)             \
+  F(Int8x16Shuffle, 18, 1)             \
+  F(Int8x16FromUint8x16, 1, 1)         \
+  F(Int8x16FromFloat32x4Bits, 1, 1)    \
+  F(Int8x16FromInt32x4Bits, 1, 1)      \
+  F(Int8x16FromUint32x4Bits, 1, 1)     \
+  F(Int8x16FromInt16x8Bits, 1, 1)      \
+  F(Int8x16FromUint16x8Bits, 1, 1)     \
+  F(Int8x16FromUint8x16Bits, 1, 1)     \
+  F(Int8x16Load, 2, 1)                 \
+  F(Int8x16Store, 3, 1)                \
+  F(Uint8x16Check, 1, 1)               \
+  F(Uint8x16ExtractLane, 2, 1)         \
+  F(Uint8x16ReplaceLane, 3, 1)         \
+  F(Uint8x16Add, 2, 1)                 \
+  F(Uint8x16AddSaturate, 2, 1)         \
+  F(Uint8x16Sub, 2, 1)                 \
+  F(Uint8x16SubSaturate, 2, 1)         \
+  F(Uint8x16Mul, 2, 1)                 \
+  F(Uint8x16Min, 2, 1)                 \
+  F(Uint8x16Max, 2, 1)                 \
+  F(Uint8x16And, 2, 1)                 \
+  F(Uint8x16Or, 2, 1)                  \
+  F(Uint8x16Xor, 2, 1)                 \
+  F(Uint8x16Not, 1, 1)                 \
+  F(Uint8x16ShiftLeftByScalar, 2, 1)   \
+  F(Uint8x16ShiftRightByScalar, 2, 1)  \
+  F(Uint8x16Equal, 2, 1)               \
+  F(Uint8x16NotEqual, 2, 1)            \
+  F(Uint8x16LessThan, 2, 1)            \
+  F(Uint8x16LessThanOrEqual, 2, 1)     \
+  F(Uint8x16GreaterThan, 2, 1)         \
+  F(Uint8x16GreaterThanOrEqual, 2, 1)  \
+  F(Uint8x16Select, 3, 1)              \
+  F(Uint8x16Swizzle, 17, 1)            \
+  F(Uint8x16Shuffle, 18, 1)            \
+  F(Uint8x16FromInt8x16, 1, 1)         \
+  F(Uint8x16FromFloat32x4Bits, 1, 1)   \
+  F(Uint8x16FromInt32x4Bits, 1, 1)     \
+  F(Uint8x16FromUint32x4Bits, 1, 1)    \
+  F(Uint8x16FromInt16x8Bits, 1, 1)     \
+  F(Uint8x16FromUint16x8Bits, 1, 1)    \
+  F(Uint8x16FromInt8x16Bits, 1, 1)     \
+  F(Uint8x16Load, 2, 1)                \
+  F(Uint8x16Store, 3, 1)               \
+  F(Bool8x16Check, 1, 1)               \
+  F(Bool8x16ExtractLane, 2, 1)         \
+  F(Bool8x16ReplaceLane, 3, 1)         \
+  F(Bool8x16And, 2, 1)                 \
+  F(Bool8x16Or, 2, 1)                  \
+  F(Bool8x16Xor, 2, 1)                 \
+  F(Bool8x16Not, 1, 1)                 \
+  F(Bool8x16AnyTrue, 1, 1)             \
+  F(Bool8x16AllTrue, 1, 1)             \
+  F(Bool8x16Swizzle, 17, 1)            \
+  F(Bool8x16Shuffle, 18, 1)            \
+  F(Bool8x16Equal, 2, 1)               \
+  F(Bool8x16NotEqual, 2, 1)
 
 
 #define FOR_EACH_INTRINSIC_STRINGS(F)     \
@@ -914,7 +894,6 @@
   F(SymbolRegistry, 0, 1)            \
   F(SymbolIsPrivate, 1, 1)
 
-
 #define FOR_EACH_INTRINSIC_TEST(F)            \
   F(DeoptimizeFunction, 1, 1)                 \
   F(DeoptimizeNow, 0, 1)                      \
@@ -942,6 +921,7 @@
   F(DisassembleFunction, 1, 1)                \
   F(TraceEnter, 0, 1)                         \
   F(TraceExit, 1, 1)                          \
+  F(TraceTailCall, 0, 1)                      \
   F(HaveSameMap, 2, 1)                        \
   F(InNewSpace, 1, 1)                         \
   F(HasFastSmiElements, 1, 1)                 \
@@ -963,7 +943,6 @@
   F(HasFixedFloat64Elements, 1, 1)            \
   F(HasFixedUint8ClampedElements, 1, 1)
 
-
 #define FOR_EACH_INTRINSIC_TYPEDARRAY(F)     \
   F(ArrayBufferGetByteLength, 1, 1)          \
   F(ArrayBufferSliceImpl, 4, 1)              \
@@ -981,7 +960,6 @@
   F(IsSharedTypedArray, 1, 1)                \
   F(IsSharedIntegerTypedArray, 1, 1)         \
   F(IsSharedInteger32TypedArray, 1, 1)       \
-  F(DataViewInitialize, 4, 1)                \
   F(DataViewGetUint8, 3, 1)                  \
   F(DataViewGetInt8, 3, 1)                   \
   F(DataViewGetUint16, 3, 1)                 \
@@ -1004,11 +982,11 @@
   F(URIEscape, 1, 1)              \
   F(URIUnescape, 1, 1)
 
-
 #define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
-  F(LoadLookupSlot, 2, 2)                 \
-  F(LoadLookupSlotNoReferenceError, 2, 2)
+  F(LoadLookupSlotForCall, 1, 2)
 
+#define FOR_EACH_INTRINSIC_RETURN_TRIPLE(F) \
+  F(ForInPrepare, 1, 3)
 
 // Most intrinsics are implemented in the runtime/ directory, but ICs are
 // implemented in ic.cc for now.
@@ -1029,7 +1007,7 @@
   F(LoadIC_MissFromStubFailure, 4, 1)        \
   F(LoadPropertyWithInterceptor, 3, 1)       \
   F(LoadPropertyWithInterceptorOnly, 3, 1)   \
-  F(StoreCallbackProperty, 5, 1)             \
+  F(StoreCallbackProperty, 6, 1)             \
   F(StoreIC_Miss, 5, 1)                      \
   F(StoreIC_MissFromStubFailure, 5, 1)       \
   F(StoreIC_Slow, 5, 1)                      \
@@ -1074,8 +1052,9 @@
 
 // FOR_EACH_INTRINSIC defines the list of all intrinsics, coming in 2 flavors,
 // either returning an object or a pair.
-#define FOR_EACH_INTRINSIC(F)       \
-  FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
+#define FOR_EACH_INTRINSIC(F)         \
+  FOR_EACH_INTRINSIC_RETURN_TRIPLE(F) \
+  FOR_EACH_INTRINSIC_RETURN_PAIR(F)   \
   FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
 
 
@@ -1148,8 +1127,7 @@
       Handle<Object> value, LanguageMode language_mode);
 
   MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty(
-      Isolate* isolate, Handle<Object> object, Handle<Object> key,
-      LanguageMode language_mode = SLOPPY);
+      Isolate* isolate, Handle<Object> object, Handle<Object> key);
 
   enum TypedArrayId {
     // arrayIds below should be synchronized with typedarray.js natives.
@@ -1203,6 +1181,7 @@
   unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
   unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
 
+
   base::SmartArrayPointer<Runtime::Function> redirected_intrinsic_functions_;
 
   friend class Isolate;
diff --git a/src/snapshot/serialize.cc b/src/snapshot/serialize.cc
index 421cf07..4868abd 100644
--- a/src/snapshot/serialize.cc
+++ b/src/snapshot/serialize.cc
@@ -54,8 +54,6 @@
       "StackGuard::address_of_real_jslimit()");
   Add(ExternalReference::new_space_start(isolate).address(),
       "Heap::NewSpaceStart()");
-  Add(ExternalReference::new_space_mask(isolate).address(),
-      "Heap::NewSpaceMask()");
   Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
       "Heap::NewSpaceAllocationLimitAddress()");
   Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
@@ -82,6 +80,8 @@
   Add(ExternalReference::address_of_one_half().address(),
       "LDoubleConstant::one_half");
   Add(ExternalReference::isolate_address(isolate).address(), "isolate");
+  Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
+      "Interpreter::dispatch_table_address");
   Add(ExternalReference::address_of_negative_infinity().address(),
       "LDoubleConstant::negative_infinity");
   Add(ExternalReference::power_double_double_function(isolate).address(),
@@ -119,6 +119,22 @@
       "InvokeFunctionCallback");
   Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
       "InvokeAccessorGetterCallback");
+  Add(ExternalReference::f32_trunc_wrapper_function(isolate).address(),
+      "f32_trunc_wrapper");
+  Add(ExternalReference::f32_floor_wrapper_function(isolate).address(),
+      "f32_floor_wrapper");
+  Add(ExternalReference::f32_ceil_wrapper_function(isolate).address(),
+      "f32_ceil_wrapper");
+  Add(ExternalReference::f32_nearest_int_wrapper_function(isolate).address(),
+      "f32_nearest_int_wrapper");
+  Add(ExternalReference::f64_trunc_wrapper_function(isolate).address(),
+      "f64_trunc_wrapper");
+  Add(ExternalReference::f64_floor_wrapper_function(isolate).address(),
+      "f64_floor_wrapper");
+  Add(ExternalReference::f64_ceil_wrapper_function(isolate).address(),
+      "f64_ceil_wrapper");
+  Add(ExternalReference::f64_nearest_int_wrapper_function(isolate).address(),
+      "f64_nearest_int_wrapper");
   Add(ExternalReference::log_enter_external_function(isolate).address(),
       "Logger::EnterExternal");
   Add(ExternalReference::log_leave_external_function(isolate).address(),
@@ -268,9 +284,14 @@
   static const AccessorRefTable accessors[] = {
 #define ACCESSOR_INFO_DECLARATION(name)                                     \
   { FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter" } \
-  , {FUNCTION_ADDR(&Accessors::name##Setter), "Accessors::" #name "Setter"},
+  ,
       ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
 #undef ACCESSOR_INFO_DECLARATION
+#define ACCESSOR_SETTER_DECLARATION(name)                  \
+  { FUNCTION_ADDR(&Accessors::name), "Accessors::" #name } \
+  ,
+          ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
+#undef ACCESSOR_INFO_DECLARATION
   };
 
   for (unsigned i = 0; i < arraysize(accessors); ++i) {
@@ -299,6 +320,10 @@
   Add(ExternalReference::incremental_marking_record_write_function(isolate)
           .address(),
       "IncrementalMarking::RecordWrite");
+  Add(ExternalReference::incremental_marking_record_write_code_entry_function(
+          isolate)
+          .address(),
+      "IncrementalMarking::RecordWriteOfCodeEntryFromCode");
   Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
       "StoreBuffer::StoreBufferOverflow");
 
@@ -622,6 +647,10 @@
   ReadData(start, end, NEW_SPACE, NULL);
 }
 
+void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+  static const byte expected = kSynchronize;
+  CHECK_EQ(expected, source_.Get());
+}
 
 void Deserializer::DeserializeDeferredObjects() {
   for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
@@ -985,9 +1014,11 @@
     }                                                                          \
     if (emit_write_barrier && write_barrier_needed) {                          \
       Address current_address = reinterpret_cast<Address>(current);            \
+      SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));      \
       isolate->heap()->RecordWrite(                                            \
-          current_object_address,                                              \
-          static_cast<int>(current_address - current_object_address));         \
+          HeapObject::FromAddress(current_object_address),                     \
+          static_cast<int>(current_address - current_object_address),          \
+          *reinterpret_cast<Object**>(current_address));                       \
     }                                                                          \
     if (!current_was_incremented) {                                            \
       current++;                                                               \
@@ -1219,11 +1250,13 @@
         int index = data & kHotObjectMask;
         Object* hot_object = hot_objects_.Get(index);
         UnalignedCopy(current, &hot_object);
-        if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
+        if (write_barrier_needed) {
           Address current_address = reinterpret_cast<Address>(current);
+          SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
           isolate->heap()->RecordWrite(
-              current_object_address,
-              static_cast<int>(current_address - current_object_address));
+              HeapObject::FromAddress(current_object_address),
+              static_cast<int>(current_address - current_object_address),
+              hot_object);
         }
         current++;
         break;
@@ -1567,7 +1600,7 @@
   // would cause dupes.
   DCHECK(!o->IsScript());
   return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
-         o->IsCode() || o->IsScopeInfo() || o->IsExecutableAccessorInfo() ||
+         o->IsCode() || o->IsScopeInfo() || o->IsAccessorInfo() ||
          o->map() ==
              startup_serializer_->isolate()->heap()->fixed_cow_array_map();
 }
@@ -1655,9 +1688,10 @@
   return false;
 }
 
-
 StartupSerializer::StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
-    : Serializer(isolate, sink), root_index_wave_front_(0) {
+    : Serializer(isolate, sink),
+      root_index_wave_front_(0),
+      serializing_builtins_(false) {
   // Clear the cache of objects used by the partial snapshot.  After the
   // strong roots have been serialized we can create a partial snapshot
   // which will repopulate the cache with objects needed by that partial
@@ -1671,17 +1705,30 @@
                                         WhereToPoint where_to_point, int skip) {
   DCHECK(!obj->IsJSFunction());
 
-  int root_index = root_index_map_.Lookup(obj);
-  // We can only encode roots as such if it has already been serialized.
-  // That applies to root indices below the wave front.
-  if (root_index != RootIndexMap::kInvalidRootIndex &&
-      root_index < root_index_wave_front_) {
-    PutRoot(root_index, obj, how_to_code, where_to_point, skip);
-    return;
+  if (obj->IsCode()) {
+    Code* code = Code::cast(obj);
+    // If the function code is compiled (either as native code or bytecode),
+    // replace it with lazy-compile builtin. Only exception is when we are
+    // serializing the canonical interpreter-entry-trampoline builtin.
+    if (code->kind() == Code::FUNCTION ||
+        (!serializing_builtins_ && code->is_interpreter_entry_trampoline())) {
+      obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
+    }
+  } else if (obj->IsBytecodeArray()) {
+    obj = isolate()->heap()->undefined_value();
   }
 
-  if (obj->IsCode() && Code::cast(obj)->kind() == Code::FUNCTION) {
-    obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
+  int root_index = root_index_map_.Lookup(obj);
+  bool is_immortal_immovable_root = false;
+  // We can only encode roots as such if it has already been serialized.
+  // That applies to root indices below the wave front.
+  if (root_index != RootIndexMap::kInvalidRootIndex) {
+    if (root_index < root_index_wave_front_) {
+      PutRoot(root_index, obj, how_to_code, where_to_point, skip);
+      return;
+    } else {
+      is_immortal_immovable_root = Heap::RootIsImmortalImmovable(root_index);
+    }
   }
 
   if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
@@ -1692,6 +1739,14 @@
   ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
                                      where_to_point);
   object_serializer.Serialize();
+
+  if (is_immortal_immovable_root) {
+    // Make sure that the immortal immovable root has been included in the first
+    // chunk of its reserved space , so that it is deserialized onto the first
+    // page of its space and stays immortal immovable.
+    BackReference ref = back_reference_map_.Lookup(obj);
+    CHECK(ref.is_valid() && ref.chunk_index() == 0);
+  }
 }
 
 
@@ -1708,6 +1763,12 @@
   Pad();
 }
 
+void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+  // We expect the builtins tag after builtins have been serialized.
+  DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
+  serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
+  sink_->Put(kSynchronize, "Synchronize");
+}
 
 void Serializer::PutRoot(int root_index,
                          HeapObject* object,
@@ -1911,24 +1972,36 @@
   sink_->PutInt(bytes_to_output, "SkipDistance");
 }
 
-
-// Clear and later restore the next link in the weak cell, if the object is one.
-class UnlinkWeakCellScope {
+// Clear and later restore the next link in the weak cell or allocation site.
+// TODO(all): replace this with proper iteration of weak slots in serializer.
+class UnlinkWeakNextScope {
  public:
-  explicit UnlinkWeakCellScope(HeapObject* object) : weak_cell_(NULL) {
+  explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
     if (object->IsWeakCell()) {
-      weak_cell_ = WeakCell::cast(object);
-      next_ = weak_cell_->next();
-      weak_cell_->clear_next(object->GetHeap()->the_hole_value());
+      object_ = object;
+      next_ = WeakCell::cast(object)->next();
+      WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
+    } else if (object->IsAllocationSite()) {
+      object_ = object;
+      next_ = AllocationSite::cast(object)->weak_next();
+      AllocationSite::cast(object)
+          ->set_weak_next(object->GetHeap()->undefined_value());
     }
   }
 
-  ~UnlinkWeakCellScope() {
-    if (weak_cell_) weak_cell_->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
+  ~UnlinkWeakNextScope() {
+    if (object_ != nullptr) {
+      if (object_->IsWeakCell()) {
+        WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
+      } else {
+        AllocationSite::cast(object_)
+            ->set_weak_next(next_, UPDATE_WEAK_WRITE_BARRIER);
+      }
+    }
   }
 
  private:
-  WeakCell* weak_cell_;
+  HeapObject* object_;
   Object* next_;
   DisallowHeapAllocation no_gc_;
 };
@@ -1986,7 +2059,7 @@
     return;
   }
 
-  UnlinkWeakCellScope unlink_weak_cell(object_);
+  UnlinkWeakNextScope unlink_weak_next(object_);
 
   object_->IterateBody(map->instance_type(), size, this);
   OutputRawData(object_->address() + size);
@@ -2013,7 +2086,7 @@
   serializer_->PutBackReference(object_, reference);
   sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
 
-  UnlinkWeakCellScope unlink_weak_cell(object_);
+  UnlinkWeakNextScope unlink_weak_next(object_);
 
   object_->IterateBody(map->instance_type(), size, this);
   OutputRawData(object_->address() + size);
diff --git a/src/snapshot/serialize.h b/src/snapshot/serialize.h
index 7f4676e..f7420ef 100644
--- a/src/snapshot/serialize.h
+++ b/src/snapshot/serialize.h
@@ -383,6 +383,8 @@
  private:
   void VisitPointers(Object** start, Object** end) override;
 
+  void Synchronize(VisitorSynchronization::SyncTag tag) override;
+
   void VisitRuntimeEntry(RelocInfo* rinfo) override { UNREACHABLE(); }
 
   void Initialize(Isolate* isolate);
@@ -471,7 +473,6 @@
  public:
   Serializer(Isolate* isolate, SnapshotByteSink* sink);
   ~Serializer() override;
-  void VisitPointers(Object** start, Object** end) override;
 
   void EncodeReservations(List<SerializedData::Reservation>* out) const;
 
@@ -578,6 +579,8 @@
   friend class SnapshotData;
 
  private:
+  void VisitPointers(Object** start, Object** end) override;
+
   CodeAddressMap* code_address_map_;
   // Objects from the same space are put into chunks for bulk-allocation
   // when deserializing. We have to make sure that each chunk fits into a
@@ -620,10 +623,11 @@
 
   // Serialize the objects reachable from a single object pointer.
   void Serialize(Object** o);
+
+ private:
   void SerializeObject(HeapObject* o, HowToCode how_to_code,
                        WhereToPoint where_to_point, int skip) override;
 
- private:
   int PartialSnapshotCacheIndex(HeapObject* o);
   bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
 
@@ -639,21 +643,23 @@
   StartupSerializer(Isolate* isolate, SnapshotByteSink* sink);
   ~StartupSerializer() override { OutputStatistics("StartupSerializer"); }
 
-  // The StartupSerializer has to serialize the root array, which is slightly
-  // different.
-  void VisitPointers(Object** start, Object** end) override;
-
   // Serialize the current state of the heap.  The order is:
   // 1) Strong references.
   // 2) Partial snapshot cache.
   // 3) Weak references (e.g. the string table).
-  virtual void SerializeStrongReferences();
-  void SerializeObject(HeapObject* o, HowToCode how_to_code,
-                       WhereToPoint where_to_point, int skip) override;
+  void SerializeStrongReferences();
   void SerializeWeakReferencesAndDeferred();
 
  private:
+  // The StartupSerializer has to serialize the root array, which is slightly
+  // different.
+  void VisitPointers(Object** start, Object** end) override;
+  void SerializeObject(HeapObject* o, HowToCode how_to_code,
+                       WhereToPoint where_to_point, int skip) override;
+  void Synchronize(VisitorSynchronization::SyncTag tag) override;
+
   intptr_t root_index_wave_front_;
+  bool serializing_builtins_;
   DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
 };
 
diff --git a/src/source-position.h b/src/source-position.h
new file mode 100644
index 0000000..46ee982
--- /dev/null
+++ b/src/source-position.h
@@ -0,0 +1,87 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SOURCE_POSITION_H_
+#define V8_SOURCE_POSITION_H_
+
+#include <ostream>
+
+#include "src/assembler.h"
+#include "src/flags.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// This class encapsulates encoding and decoding of sources positions from
+// which hydrogen values originated.
+// When FLAG_track_hydrogen_positions is set this object encodes the
+// identifier of the inlining and absolute offset from the start of the
+// inlined function.
+// When the flag is not set we simply track absolute offset from the
+// script start.
+class SourcePosition {
+ public:
+  static SourcePosition Unknown() {
+    return SourcePosition::FromRaw(kNoPosition);
+  }
+
+  bool IsUnknown() const { return value_ == kNoPosition; }
+
+  uint32_t position() const { return PositionField::decode(value_); }
+  void set_position(uint32_t position) {
+    if (FLAG_hydrogen_track_positions) {
+      value_ = static_cast<uint32_t>(PositionField::update(value_, position));
+    } else {
+      value_ = position;
+    }
+  }
+
+  uint32_t inlining_id() const { return InliningIdField::decode(value_); }
+  void set_inlining_id(uint32_t inlining_id) {
+    if (FLAG_hydrogen_track_positions) {
+      value_ =
+          static_cast<uint32_t>(InliningIdField::update(value_, inlining_id));
+    }
+  }
+
+  uint32_t raw() const { return value_; }
+
+ private:
+  static const uint32_t kNoPosition =
+      static_cast<uint32_t>(RelocInfo::kNoPosition);
+  typedef BitField<uint32_t, 0, 9> InliningIdField;
+
+  // Offset from the start of the inlined function.
+  typedef BitField<uint32_t, 9, 23> PositionField;
+
+  friend class HPositionInfo;
+  friend class Deoptimizer;
+
+  static SourcePosition FromRaw(uint32_t raw_position) {
+    SourcePosition position;
+    position.value_ = raw_position;
+    return position;
+  }
+
+  // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
+  // and PositionField.
+  // Otherwise contains absolute offset from the script start.
+  uint32_t value_;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const SourcePosition& p) {
+  if (p.IsUnknown()) {
+    return os << "<?>";
+  } else if (FLAG_hydrogen_track_positions) {
+    return os << "<" << p.inlining_id() << ":" << p.position() << ">";
+  } else {
+    return os << "<0:" << p.raw() << ">";
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SOURCE_POSITION_H_
diff --git a/src/startup-data-util.cc b/src/startup-data-util.cc
index 4e0ad97..e20ec21 100644
--- a/src/startup-data-util.cc
+++ b/src/startup-data-util.cc
@@ -9,6 +9,7 @@
 
 #include "src/base/logging.h"
 #include "src/base/platform/platform.h"
+#include "src/flags.h"
 #include "src/utils.h"
 
 
@@ -107,7 +108,9 @@
   char* natives;
   char* snapshot;
   LoadFromFiles(RelativePath(&natives, directory_path, "natives_blob.bin"),
-                RelativePath(&snapshot, directory_path, "snapshot_blob.bin"));
+                RelativePath(&snapshot, directory_path,
+                             FLAG_ignition ? "snapshot_blob_ignition.bin"
+                                           : "snapshot_blob.bin"));
   free(natives);
   free(snapshot);
 #endif  // V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 2801d23..43be8f1 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -527,12 +527,18 @@
   Object* name = fun->shared()->name();
   bool print_name = false;
   Isolate* isolate = fun->GetIsolate();
-  for (PrototypeIterator iter(isolate, receiver,
-                              PrototypeIterator::START_AT_RECEIVER);
-       !iter.IsAtEnd(); iter.Advance()) {
-    if (iter.GetCurrent()->IsJSObject()) {
+  if (receiver->IsNull() || receiver->IsUndefined() || receiver->IsJSProxy()) {
+    print_name = true;
+  } else {
+    if (!receiver->IsJSObject()) {
+      receiver = receiver->GetRootMap(isolate)->prototype();
+    }
+
+    for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
+                                PrototypeIterator::START_AT_RECEIVER);
+         !iter.IsAtEnd(); iter.Advance()) {
       Object* key = iter.GetCurrent<JSObject>()->SlowReverseLookup(fun);
-      if (key != isolate->heap()->undefined_value()) {
+      if (!key->IsUndefined()) {
         if (!name->IsString() ||
             !key->IsString() ||
             !String::cast(name)->Equals(String::cast(key))) {
@@ -542,9 +548,8 @@
           print_name = false;
         }
         name = key;
+        break;
       }
-    } else {
-      print_name = true;
     }
   }
   PrintName(name);
diff --git a/src/tracing/trace-event.h b/src/tracing/trace-event.h
index d2d423c..d17f785 100644
--- a/src/tracing/trace-event.h
+++ b/src/tracing/trace-event.h
@@ -24,8 +24,6 @@
 enum CategoryGroupEnabledFlags {
   // Category group enabled for the recording mode.
   kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0,
-  // Category group enabled for the monitoring mode.
-  kEnabledForMonitoring_CategoryGroupEnabledFlags = 1 << 1,
   // Category group enabled by SetEventCallbackEnabled().
   kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2,
   // Category group enabled to export events to ETW.
@@ -101,9 +99,7 @@
 // Get the number of times traces have been recorded. This is used to implement
 // the TRACE_EVENT_IS_NEW_TRACE facility.
 // unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
-#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED                 \
-  v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \
-      ->getNumTracesRecorded
+#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED UNIMPLEMENTED()
 
 // Add a trace event to the platform tracing system.
 // uint64_t TRACE_EVENT_API_ADD_TRACE_EVENT(
diff --git a/src/transitions-inl.h b/src/transitions-inl.h
index 96d9495..9424497 100644
--- a/src/transitions-inl.h
+++ b/src/transitions-inl.h
@@ -100,7 +100,9 @@
 
 
 int TransitionArray::SearchName(Name* name, int* out_insertion_index) {
-  return internal::Search<ALL_ENTRIES>(this, name, 0, out_insertion_index);
+  DCHECK(name->IsUniqueName());
+  return internal::Search<ALL_ENTRIES>(this, name, number_of_entries(),
+                                       out_insertion_index);
 }
 
 
diff --git a/src/transitions.cc b/src/transitions.cc
index fc24b28..e63769e 100644
--- a/src/transitions.cc
+++ b/src/transitions.cc
@@ -159,20 +159,21 @@
 // static
 Map* TransitionArray::SearchTransition(Map* map, PropertyKind kind, Name* name,
                                        PropertyAttributes attributes) {
+  DCHECK(name->IsUniqueName());
   Object* raw_transitions = map->raw_transitions();
   if (IsSimpleTransition(raw_transitions)) {
     Map* target = GetSimpleTransition(raw_transitions);
     Name* key = GetSimpleTransitionKey(target);
-    if (!key->Equals(name)) return NULL;
+    if (key != name) return nullptr;
     PropertyDetails details = GetSimpleTargetDetails(target);
-    if (details.attributes() != attributes) return NULL;
-    if (details.kind() != kind) return NULL;
+    if (details.attributes() != attributes) return nullptr;
+    if (details.kind() != kind) return nullptr;
     return target;
   }
   if (IsFullTransitionArray(raw_transitions)) {
     TransitionArray* transitions = TransitionArray::cast(raw_transitions);
     int transition = transitions->Search(kind, name, attributes);
-    if (transition == kNotFound) return NULL;
+    if (transition == kNotFound) return nullptr;
     return transitions->GetTarget(transition);
   }
   return NULL;
@@ -195,6 +196,7 @@
 // static
 Handle<Map> TransitionArray::FindTransitionToField(Handle<Map> map,
                                                    Handle<Name> name) {
+  DCHECK(name->IsUniqueName());
   DisallowHeapAllocation no_gc;
   Map* target = SearchTransition(*map, kData, *name, NONE);
   if (target == NULL) return Handle<Map>::null();
@@ -545,9 +547,7 @@
                             PropertyAttributes attributes,
                             int* out_insertion_index) {
   int transition = SearchName(name, out_insertion_index);
-  if (transition == kNotFound) {
-    return kNotFound;
-  }
+  if (transition == kNotFound) return kNotFound;
   return SearchDetails(transition, kind, attributes, out_insertion_index);
 }
 }  // namespace internal
diff --git a/src/type-cache.cc b/src/type-cache.cc
index 9ed8621..d05aaa1 100644
--- a/src/type-cache.cc
+++ b/src/type-cache.cc
@@ -5,7 +5,6 @@
 #include "src/type-cache.h"
 
 #include "src/base/lazy-instance.h"
-#include "src/types-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/type-cache.h b/src/type-cache.h
index 1b3a260..8bd35c0 100644
--- a/src/type-cache.h
+++ b/src/type-cache.h
@@ -112,10 +112,6 @@
   Type* const kStringLengthType =
       CreateNative(CreateRange(0.0, String::kMaxLength), Type::TaggedSigned());
 
-  // When initializing arrays, we'll unfold the loop if the number of
-  // elements is known to be of this type.
-  Type* const kElementLoopUnrollType = CreateRange(0.0, 16.0);
-
 #define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
   Type* const k##TypeName##Array = CreateArray(k##TypeName);
   TYPED_ARRAYS(TYPED_ARRAY)
diff --git a/src/type-feedback-vector.cc b/src/type-feedback-vector.cc
index 698f2a6..4519bd6 100644
--- a/src/type-feedback-vector.cc
+++ b/src/type-feedback-vector.cc
@@ -15,8 +15,13 @@
 
 
 static bool IsPropertyNameFeedback(Object* feedback) {
-  return feedback->IsString() ||
-         (feedback->IsSymbol() && !Symbol::cast(feedback)->is_private());
+  if (feedback->IsString()) return true;
+  if (!feedback->IsSymbol()) return false;
+  Symbol* symbol = Symbol::cast(feedback);
+  Heap* heap = symbol->GetHeap();
+  return symbol != heap->uninitialized_symbol() &&
+         symbol != heap->premonomorphic_symbol() &&
+         symbol != heap->megamorphic_symbol();
 }
 
 
@@ -335,6 +340,10 @@
 
 
 void FeedbackNexus::ConfigureMegamorphic() {
+  // Keyed ICs must use ConfigureMegamorphicKeyed.
+  DCHECK_NE(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector()->GetKind(slot()));
+  DCHECK_NE(FeedbackVectorSlotKind::KEYED_STORE_IC, vector()->GetKind(slot()));
+
   Isolate* isolate = GetIsolate();
   SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(isolate),
               SKIP_WRITE_BARRIER);
@@ -342,6 +351,21 @@
                    SKIP_WRITE_BARRIER);
 }
 
+void KeyedLoadICNexus::ConfigureMegamorphicKeyed(IcCheckType property_type) {
+  Isolate* isolate = GetIsolate();
+  SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(isolate),
+              SKIP_WRITE_BARRIER);
+  SetFeedbackExtra(Smi::FromInt(static_cast<int>(property_type)),
+                   SKIP_WRITE_BARRIER);
+}
+
+void KeyedStoreICNexus::ConfigureMegamorphicKeyed(IcCheckType property_type) {
+  Isolate* isolate = GetIsolate();
+  SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(isolate),
+              SKIP_WRITE_BARRIER);
+  SetFeedbackExtra(Smi::FromInt(static_cast<int>(property_type)),
+                   SKIP_WRITE_BARRIER);
+}
 
 InlineCacheState LoadICNexus::StateFromFeedback() const {
   Isolate* isolate = GetIsolate();
@@ -819,10 +843,20 @@
   return mode;
 }
 
+IcCheckType KeyedLoadICNexus::GetKeyType() const {
+  Object* feedback = GetFeedback();
+  if (feedback == *TypeFeedbackVector::MegamorphicSentinel(GetIsolate())) {
+    return static_cast<IcCheckType>(Smi::cast(GetFeedbackExtra())->value());
+  }
+  return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
+}
 
 IcCheckType KeyedStoreICNexus::GetKeyType() const {
-  // The structure of the vector slots tells us the type.
-  return GetFeedback()->IsName() ? PROPERTY : ELEMENT;
+  Object* feedback = GetFeedback();
+  if (feedback == *TypeFeedbackVector::MegamorphicSentinel(GetIsolate())) {
+    return static_cast<IcCheckType>(Smi::cast(GetFeedbackExtra())->value());
+  }
+  return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
 }
 }  // namespace internal
 }  // namespace v8
diff --git a/src/type-feedback-vector.h b/src/type-feedback-vector.h
index d83b77f..effbfe7 100644
--- a/src/type-feedback-vector.h
+++ b/src/type-feedback-vector.h
@@ -343,6 +343,7 @@
   FeedbackVectorSlot slot() const { return slot_; }
 
   InlineCacheState ic_state() const { return StateFromFeedback(); }
+  bool IsUninitialized() const { return StateFromFeedback() == UNINITIALIZED; }
   Map* FindFirstMap() const {
     MapHandleList maps;
     ExtractMaps(&maps);
@@ -474,6 +475,9 @@
   void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
                             CodeHandleList* handlers);
 
+  void ConfigureMegamorphicKeyed(IcCheckType property_type);
+
+  IcCheckType GetKeyType() const;
   InlineCacheState StateFromFeedback() const override;
   Name* FindFirstName() const override;
 };
@@ -530,6 +534,7 @@
   void ConfigurePolymorphic(MapHandleList* maps,
                             MapHandleList* transitioned_maps,
                             CodeHandleList* handlers);
+  void ConfigureMegamorphicKeyed(IcCheckType property_type);
 
   KeyedAccessStoreMode GetKeyedAccessStoreMode() const;
   IcCheckType GetKeyType() const;
diff --git a/src/type-info.cc b/src/type-info.cc
index a8a406e..ad25342 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -200,7 +200,7 @@
   Handle<Object> info = GetInfo(id);
   if (!info->IsCode()) {
     // For some comparisons we don't have ICs, e.g. LiteralCompareTypeof.
-    *left_type = *right_type = *combined_type = Type::None(zone());
+    *left_type = *right_type = *combined_type = Type::None();
     return;
   }
   Handle<Code> code = Handle<Code>::cast(info);
@@ -235,7 +235,7 @@
     // operations covered by the BinaryOpIC we should always have them.
     DCHECK(op < BinaryOpICState::FIRST_TOKEN ||
            op > BinaryOpICState::LAST_TOKEN);
-    *left = *right = *result = Type::None(zone());
+    *left = *right = *result = Type::None();
     *fixed_right_arg = Nothing<int>();
     *allocation_site = Handle<AllocationSite>::null();
     return;
@@ -261,7 +261,7 @@
 
 Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
   Handle<Object> object = GetInfo(id);
-  if (!object->IsCode()) return Type::None(zone());
+  if (!object->IsCode()) return Type::None();
   Handle<Code> code = Handle<Code>::cast(object);
   DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
   BinaryOpICState state(isolate(), code->extra_ic_state());
@@ -301,7 +301,7 @@
     KeyedLoadICNexus nexus(feedback_vector_, slot);
     CollectReceiverTypes<FeedbackNexus>(&nexus, receiver_types);
     *is_string = HasOnlyStringMaps(receiver_types);
-    *key_type = nexus.FindFirstName() != NULL ? PROPERTY : ELEMENT;
+    *key_type = nexus.GetKeyType();
   }
 }
 
diff --git a/src/types-inl.h b/src/types-inl.h
deleted file mode 100644
index 9af4bcc..0000000
--- a/src/types-inl.h
+++ /dev/null
@@ -1,487 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TYPES_INL_H_
-#define V8_TYPES_INL_H_
-
-#include "src/types.h"
-
-#include "src/factory.h"
-#include "src/handles-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// TypeImpl
-
-template<class Config>
-typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::SignedSmall() {
-  return i::SmiValuesAre31Bits() ? kSigned31 : kSigned32;
-}
-
-
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::UnsignedSmall() {
-  return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
-}
-
-
-#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
-template<class Config> \
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Name( \
-    Isolate* isolate, Region* region) { \
-  return Class(i::handle(isolate->heap()->name##_map()), region); \
-}
-SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
-#undef CONSTRUCT_SIMD_TYPE
-
-
-template<class Config>
-TypeImpl<Config>* TypeImpl<Config>::cast(typename Config::Base* object) {
-  TypeImpl* t = static_cast<TypeImpl*>(object);
-  DCHECK(t->IsBitset() || t->IsClass() || t->IsConstant() || t->IsRange() ||
-         t->IsUnion() || t->IsArray() || t->IsFunction() || t->IsContext());
-  return t;
-}
-
-
-// Most precise _current_ type of a value (usually its class).
-template<class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NowOf(
-    i::Object* value, Region* region) {
-  if (value->IsSmi() ||
-      i::HeapObject::cast(value)->map()->instance_type() == HEAP_NUMBER_TYPE) {
-    return Of(value, region);
-  }
-  return Class(i::handle(i::HeapObject::cast(value)->map()), region);
-}
-
-
-template<class Config>
-bool TypeImpl<Config>::NowContains(i::Object* value) {
-  DisallowHeapAllocation no_allocation;
-  if (this->IsAny()) return true;
-  if (value->IsHeapObject()) {
-    i::Map* map = i::HeapObject::cast(value)->map();
-    for (Iterator<i::Map> it = this->Classes(); !it.Done(); it.Advance()) {
-      if (*it.Current() == map) return true;
-    }
-  }
-  return this->Contains(value);
-}
-
-
-// -----------------------------------------------------------------------------
-// ZoneTypeConfig
-
-// static
-template<class T>
-T* ZoneTypeConfig::handle(T* type) {
-  return type;
-}
-
-
-// static
-template<class T>
-T* ZoneTypeConfig::cast(Type* type) {
-  return static_cast<T*>(type);
-}
-
-
-// static
-bool ZoneTypeConfig::is_bitset(Type* type) {
-  return reinterpret_cast<uintptr_t>(type) & 1;
-}
-
-
-// static
-bool ZoneTypeConfig::is_struct(Type* type, int tag) {
-  DCHECK(tag != kRangeStructTag);
-  if (is_bitset(type)) return false;
-  int type_tag = struct_tag(as_struct(type));
-  return type_tag == tag;
-}
-
-
-// static
-bool ZoneTypeConfig::is_range(Type* type) {
-  if (is_bitset(type)) return false;
-  int type_tag = struct_tag(as_struct(type));
-  return type_tag == kRangeStructTag;
-}
-
-
-// static
-bool ZoneTypeConfig::is_class(Type* type) {
-  return false;
-}
-
-
-// static
-ZoneTypeConfig::Type::bitset ZoneTypeConfig::as_bitset(Type* type) {
-  DCHECK(is_bitset(type));
-  return static_cast<Type::bitset>(reinterpret_cast<uintptr_t>(type) ^ 1u);
-}
-
-
-// static
-ZoneTypeConfig::Struct* ZoneTypeConfig::as_struct(Type* type) {
-  DCHECK(!is_bitset(type));
-  return reinterpret_cast<Struct*>(type);
-}
-
-
-// static
-ZoneTypeConfig::Range* ZoneTypeConfig::as_range(Type* type) {
-  DCHECK(!is_bitset(type));
-  return reinterpret_cast<Range*>(type);
-}
-
-
-// static
-i::Handle<i::Map> ZoneTypeConfig::as_class(Type* type) {
-  UNREACHABLE();
-  return i::Handle<i::Map>();
-}
-
-
-// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(Type::bitset bitset) {
-  return reinterpret_cast<Type*>(static_cast<uintptr_t>(bitset | 1u));
-}
-
-
-// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(
-    Type::bitset bitset, Zone* Zone) {
-  return from_bitset(bitset);
-}
-
-
-// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_struct(Struct* structure) {
-  return reinterpret_cast<Type*>(structure);
-}
-
-
-// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_range(Range* range) {
-  return reinterpret_cast<Type*>(range);
-}
-
-
-// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_class(
-    i::Handle<i::Map> map, Zone* zone) {
-  return from_bitset(0);
-}
-
-
-// static
-ZoneTypeConfig::Struct* ZoneTypeConfig::struct_create(
-    int tag, int length, Zone* zone) {
-  DCHECK(tag != kRangeStructTag);
-  Struct* structure = reinterpret_cast<Struct*>(
-      zone->New(sizeof(void*) * (length + 2)));  // NOLINT
-  structure[0] = reinterpret_cast<void*>(tag);
-  structure[1] = reinterpret_cast<void*>(length);
-  return structure;
-}
-
-
-// static
-void ZoneTypeConfig::struct_shrink(Struct* structure, int length) {
-  DCHECK(0 <= length && length <= struct_length(structure));
-  structure[1] = reinterpret_cast<void*>(length);
-}
-
-
-// static
-int ZoneTypeConfig::struct_tag(Struct* structure) {
-  return static_cast<int>(reinterpret_cast<intptr_t>(structure[0]));
-}
-
-
-// static
-int ZoneTypeConfig::struct_length(Struct* structure) {
-  return static_cast<int>(reinterpret_cast<intptr_t>(structure[1]));
-}
-
-
-// static
-Type* ZoneTypeConfig::struct_get(Struct* structure, int i) {
-  DCHECK(0 <= i && i <= struct_length(structure));
-  return static_cast<Type*>(structure[2 + i]);
-}
-
-
-// static
-void ZoneTypeConfig::struct_set(Struct* structure, int i, Type* x) {
-  DCHECK(0 <= i && i <= struct_length(structure));
-  structure[2 + i] = x;
-}
-
-
-// static
-template<class V>
-i::Handle<V> ZoneTypeConfig::struct_get_value(Struct* structure, int i) {
-  DCHECK(0 <= i && i <= struct_length(structure));
-  return i::Handle<V>(static_cast<V**>(structure[2 + i]));
-}
-
-
-// static
-template<class V>
-void ZoneTypeConfig::struct_set_value(
-    Struct* structure, int i, i::Handle<V> x) {
-  DCHECK(0 <= i && i <= struct_length(structure));
-  structure[2 + i] = x.location();
-}
-
-
-// static
-ZoneTypeConfig::Range* ZoneTypeConfig::range_create(Zone* zone) {
-  Range* range = reinterpret_cast<Range*>(zone->New(sizeof(Range)));  // NOLINT
-  range->tag = reinterpret_cast<void*>(kRangeStructTag);
-  range->bitset = 0;
-  range->limits[0] = 1;
-  range->limits[1] = 0;
-  return range;
-}
-
-
-// static
-int ZoneTypeConfig::range_get_bitset(ZoneTypeConfig::Range* range) {
-  return range->bitset;
-}
-
-
-// static
-void ZoneTypeConfig::range_set_bitset(ZoneTypeConfig::Range* range, int value) {
-  range->bitset = value;
-}
-
-
-// static
-double ZoneTypeConfig::range_get_double(ZoneTypeConfig::Range* range,
-                                        int index) {
-  DCHECK(index >= 0 && index < 2);
-  return range->limits[index];
-}
-
-
-// static
-void ZoneTypeConfig::range_set_double(ZoneTypeConfig::Range* range, int index,
-                                      double value, Zone*) {
-  DCHECK(index >= 0 && index < 2);
-  range->limits[index] = value;
-}
-
-
-// -----------------------------------------------------------------------------
-// HeapTypeConfig
-
-// static
-template<class T>
-i::Handle<T> HeapTypeConfig::handle(T* type) {
-  return i::handle(type, i::HeapObject::cast(type)->GetIsolate());
-}
-
-
-// static
-template<class T>
-i::Handle<T> HeapTypeConfig::cast(i::Handle<Type> type) {
-  return i::Handle<T>::cast(type);
-}
-
-
-// static
-bool HeapTypeConfig::is_bitset(Type* type) {
-  return type->IsSmi();
-}
-
-
-// static
-bool HeapTypeConfig::is_class(Type* type) {
-  return type->IsMap();
-}
-
-
-// static
-bool HeapTypeConfig::is_struct(Type* type, int tag) {
-  DCHECK(tag != kRangeStructTag);
-  return type->IsFixedArray() && struct_tag(as_struct(type)) == tag;
-}
-
-
-// static
-bool HeapTypeConfig::is_range(Type* type) {
-  return type->IsFixedArray() && struct_tag(as_struct(type)) == kRangeStructTag;
-}
-
-
-// static
-HeapTypeConfig::Type::bitset HeapTypeConfig::as_bitset(Type* type) {
-  // TODO(rossberg): Breaks the Smi abstraction. Fix once there is a better way.
-  return static_cast<Type::bitset>(reinterpret_cast<uintptr_t>(type));
-}
-
-
-// static
-i::Handle<i::Map> HeapTypeConfig::as_class(Type* type) {
-  return i::handle(i::Map::cast(type));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Struct> HeapTypeConfig::as_struct(Type* type) {
-  return i::handle(Struct::cast(type));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Range> HeapTypeConfig::as_range(Type* type) {
-  return i::handle(Range::cast(type));
-}
-
-
-// static
-HeapTypeConfig::Type* HeapTypeConfig::from_bitset(Type::bitset bitset) {
-  // TODO(rossberg): Breaks the Smi abstraction. Fix once there is a better way.
-  return reinterpret_cast<Type*>(static_cast<uintptr_t>(bitset));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_bitset(
-    Type::bitset bitset, Isolate* isolate) {
-  return i::handle(from_bitset(bitset), isolate);
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_class(
-    i::Handle<i::Map> map, Isolate* isolate) {
-  return i::Handle<Type>::cast(i::Handle<Object>::cast(map));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_struct(
-    i::Handle<Struct> structure) {
-  return i::Handle<Type>::cast(i::Handle<Object>::cast(structure));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_range(
-    i::Handle<Range> range) {
-  return i::Handle<Type>::cast(i::Handle<Object>::cast(range));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Struct> HeapTypeConfig::struct_create(
-    int tag, int length, Isolate* isolate) {
-  i::Handle<Struct> structure = isolate->factory()->NewFixedArray(length + 1);
-  structure->set(0, i::Smi::FromInt(tag));
-  return structure;
-}
-
-
-// static
-void HeapTypeConfig::struct_shrink(i::Handle<Struct> structure, int length) {
-  structure->Shrink(length + 1);
-}
-
-
-// static
-int HeapTypeConfig::struct_tag(i::Handle<Struct> structure) {
-  return static_cast<i::Smi*>(structure->get(0))->value();
-}
-
-
-// static
-int HeapTypeConfig::struct_length(i::Handle<Struct> structure) {
-  return structure->length() - 1;
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Type> HeapTypeConfig::struct_get(
-    i::Handle<Struct> structure, int i) {
-  Type* type = static_cast<Type*>(structure->get(i + 1));
-  return i::handle(type, structure->GetIsolate());
-}
-
-
-// static
-void HeapTypeConfig::struct_set(
-    i::Handle<Struct> structure, int i, i::Handle<Type> type) {
-  structure->set(i + 1, *type);
-}
-
-
-// static
-template<class V>
-i::Handle<V> HeapTypeConfig::struct_get_value(
-    i::Handle<Struct> structure, int i) {
-  V* x = static_cast<V*>(structure->get(i + 1));
-  return i::handle(x, structure->GetIsolate());
-}
-
-
-// static
-template<class V>
-void HeapTypeConfig::struct_set_value(
-    i::Handle<Struct> structure, int i, i::Handle<V> x) {
-  structure->set(i + 1, *x);
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Range> HeapTypeConfig::range_create(
-    Isolate* isolate) {
-  i::Handle<Range> range = isolate->factory()->NewFixedArray(4);
-  range->set(0, i::Smi::FromInt(kRangeStructTag));
-  return range;
-}
-
-
-// static
-int HeapTypeConfig::range_get_bitset(i::Handle<HeapTypeConfig::Range> range) {
-  Type* v = static_cast<Type*>(range->get(1));
-  return as_bitset(v);
-}
-
-
-// static
-void HeapTypeConfig::range_set_bitset(i::Handle<HeapTypeConfig::Range> range,
-                                      int value) {
-  range->set(1, from_bitset(value));
-}
-
-
-// static
-double HeapTypeConfig::range_get_double(i::Handle<HeapTypeConfig::Range> range,
-                                        int index) {
-  DCHECK(index >= 0 && index < 2);
-  return range->get(index + 2)->Number();
-}
-
-
-// static
-void HeapTypeConfig::range_set_double(i::Handle<HeapTypeConfig::Range> range,
-                                      int index, double value,
-                                      Isolate* isolate) {
-  DCHECK(index >= 0 && index < 2);
-  i::Handle<Object> number = isolate->factory()->NewNumber(value);
-  range->set(index + 2, *number);
-}
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TYPES_INL_H_
diff --git a/src/types.cc b/src/types.cc
index 9261060..d54826e 100644
--- a/src/types.cc
+++ b/src/types.cc
@@ -6,8 +6,8 @@
 
 #include "src/types.h"
 
+#include "src/handles-inl.h"
 #include "src/ostreams.h"
-#include "src/types-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -16,19 +16,17 @@
 // NOTE: If code is marked as being a "shortcut", this means that removing
 // the code won't affect the semantics of the surrounding function definition.
 
+// static
+bool Type::IsInteger(i::Object* x) {
+  return x->IsNumber() && Type::IsInteger(x->Number());
+}
 
 // -----------------------------------------------------------------------------
 // Range-related helper functions.
 
-template <class Config>
-bool TypeImpl<Config>::Limits::IsEmpty() {
-  return this->min > this->max;
-}
+bool RangeType::Limits::IsEmpty() { return this->min > this->max; }
 
-
-template<class Config>
-typename TypeImpl<Config>::Limits TypeImpl<Config>::Limits::Intersect(
-    Limits lhs, Limits rhs) {
+RangeType::Limits RangeType::Limits::Intersect(Limits lhs, Limits rhs) {
   DisallowHeapAllocation no_allocation;
   Limits result(lhs);
   if (lhs.min < rhs.min) result.min = rhs.min;
@@ -36,10 +34,7 @@
   return result;
 }
 
-
-template <class Config>
-typename TypeImpl<Config>::Limits TypeImpl<Config>::Limits::Union(
-    Limits lhs, Limits rhs) {
+RangeType::Limits RangeType::Limits::Union(Limits lhs, Limits rhs) {
   DisallowHeapAllocation no_allocation;
   if (lhs.IsEmpty()) return rhs;
   if (rhs.IsEmpty()) return lhs;
@@ -49,38 +44,26 @@
   return result;
 }
 
-
-template<class Config>
-bool TypeImpl<Config>::Overlap(
-    typename TypeImpl<Config>::RangeType* lhs,
-    typename TypeImpl<Config>::RangeType* rhs) {
+bool Type::Overlap(RangeType* lhs, RangeType* rhs) {
   DisallowHeapAllocation no_allocation;
-  return !Limits::Intersect(Limits(lhs), Limits(rhs)).IsEmpty();
+  return !RangeType::Limits::Intersect(RangeType::Limits(lhs),
+                                       RangeType::Limits(rhs))
+              .IsEmpty();
 }
 
-
-template<class Config>
-bool TypeImpl<Config>::Contains(
-    typename TypeImpl<Config>::RangeType* lhs,
-    typename TypeImpl<Config>::RangeType* rhs) {
+bool Type::Contains(RangeType* lhs, RangeType* rhs) {
   DisallowHeapAllocation no_allocation;
   return lhs->Min() <= rhs->Min() && rhs->Max() <= lhs->Max();
 }
 
-
-template <class Config>
-bool TypeImpl<Config>::Contains(typename TypeImpl<Config>::RangeType* lhs,
-                                typename TypeImpl<Config>::ConstantType* rhs) {
+bool Type::Contains(RangeType* lhs, ConstantType* rhs) {
   DisallowHeapAllocation no_allocation;
   return IsInteger(*rhs->Value()) &&
          lhs->Min() <= rhs->Value()->Number() &&
          rhs->Value()->Number() <= lhs->Max();
 }
 
-
-template<class Config>
-bool TypeImpl<Config>::Contains(
-    typename TypeImpl<Config>::RangeType* range, i::Object* val) {
+bool Type::Contains(RangeType* range, i::Object* val) {
   DisallowHeapAllocation no_allocation;
   return IsInteger(val) &&
          range->Min() <= val->Number() && val->Number() <= range->Max();
@@ -90,8 +73,7 @@
 // -----------------------------------------------------------------------------
 // Min and Max computation.
 
-template<class Config>
-double TypeImpl<Config>::Min() {
+double Type::Min() {
   DCHECK(this->SemanticIs(Number()));
   if (this->IsBitset()) return BitsetType::Min(this->AsBitset());
   if (this->IsUnion()) {
@@ -107,9 +89,7 @@
   return 0;
 }
 
-
-template<class Config>
-double TypeImpl<Config>::Max() {
+double Type::Max() {
   DCHECK(this->SemanticIs(Number()));
   if (this->IsBitset()) return BitsetType::Max(this->AsBitset());
   if (this->IsUnion()) {
@@ -131,12 +111,10 @@
 
 
 // The largest bitset subsumed by this type.
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Glb(TypeImpl* type) {
+Type::bitset BitsetType::Glb(Type* type) {
   DisallowHeapAllocation no_allocation;
   // Fast case.
-  if (type->IsBitset()) {
+  if (IsBitset(type)) {
     return type->AsBitset();
   } else if (type->IsUnion()) {
     SLOW_DCHECK(type->AsUnion()->Wellformed());
@@ -153,11 +131,9 @@
 
 
 // The smallest bitset subsuming this type, possibly not a proper one.
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
+Type::bitset BitsetType::Lub(Type* type) {
   DisallowHeapAllocation no_allocation;
-  if (type->IsBitset()) return type->AsBitset();
+  if (IsBitset(type)) return type->AsBitset();
   if (type->IsUnion()) {
     // Take the representation from the first element, which is always
     // a bitset.
@@ -174,14 +150,12 @@
   if (type->IsContext()) return kInternal & kTaggedPointer;
   if (type->IsArray()) return kOtherObject;
   if (type->IsFunction()) return kFunction;
+  if (type->IsTuple()) return kInternal;
   UNREACHABLE();
   return kNone;
 }
 
-
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
+Type::bitset BitsetType::Lub(i::Map* map) {
   DisallowHeapAllocation no_allocation;
   switch (map->instance_type()) {
     case STRING_TYPE:
@@ -241,7 +215,6 @@
     case JS_MAP_TYPE:
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
-    case JS_ITERATOR_RESULT_TYPE:
     case JS_WEAK_MAP_TYPE:
     case JS_WEAK_SET_TYPE:
     case JS_PROMISE_TYPE:
@@ -267,8 +240,7 @@
       // over type or class variables, esp ones with bounds...
       return kDetectable & kTaggedPointer;
     case ALLOCATION_SITE_TYPE:
-    case DECLARED_ACCESSOR_INFO_TYPE:
-    case EXECUTABLE_ACCESSOR_INFO_TYPE:
+    case ACCESSOR_INFO_TYPE:
     case SHARED_FUNCTION_INFO_TYPE:
     case ACCESSOR_PAIR_TYPE:
     case FIXED_ARRAY_TYPE:
@@ -292,7 +264,6 @@
       TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
 #undef FIXED_TYPED_ARRAY_CASE
     case FILLER_TYPE:
-    case DECLARED_ACCESSOR_DESCRIPTOR_TYPE:
     case ACCESS_CHECK_INFO_TYPE:
     case INTERCEPTOR_INFO_TYPE:
     case CALL_HANDLER_INFO_TYPE:
@@ -319,10 +290,7 @@
   return kNone;
 }
 
-
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Lub(i::Object* value) {
+Type::bitset BitsetType::Lub(i::Object* value) {
   DisallowHeapAllocation no_allocation;
   if (value->IsNumber()) {
     return Lub(value->Number()) &
@@ -331,10 +299,7 @@
   return Lub(i::HeapObject::cast(value)->map());
 }
 
-
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Lub(double value) {
+Type::bitset BitsetType::Lub(double value) {
   DisallowHeapAllocation no_allocation;
   if (i::IsMinusZero(value)) return kMinusZero;
   if (std::isnan(value)) return kNaN;
@@ -344,36 +309,24 @@
 
 
 // Minimum values of plain numeric bitsets.
-template <class Config>
-const typename TypeImpl<Config>::BitsetType::Boundary
-TypeImpl<Config>::BitsetType::BoundariesArray[] = {
-        {kOtherNumber, kPlainNumber, -V8_INFINITY},
-        {kOtherSigned32, kNegative32, kMinInt},
-        {kNegative31, kNegative31, -0x40000000},
-        {kUnsigned30, kUnsigned30, 0},
-        {kOtherUnsigned31, kUnsigned31, 0x40000000},
-        {kOtherUnsigned32, kUnsigned32, 0x80000000},
-        {kOtherNumber, kPlainNumber, static_cast<double>(kMaxUInt32) + 1}};
+const BitsetType::Boundary BitsetType::BoundariesArray[] = {
+    {kOtherNumber, kPlainNumber, -V8_INFINITY},
+    {kOtherSigned32, kNegative32, kMinInt},
+    {kNegative31, kNegative31, -0x40000000},
+    {kUnsigned30, kUnsigned30, 0},
+    {kOtherUnsigned31, kUnsigned31, 0x40000000},
+    {kOtherUnsigned32, kUnsigned32, 0x80000000},
+    {kOtherNumber, kPlainNumber, static_cast<double>(kMaxUInt32) + 1}};
 
+const BitsetType::Boundary* BitsetType::Boundaries() { return BoundariesArray; }
 
-template <class Config>
-const typename TypeImpl<Config>::BitsetType::Boundary*
-TypeImpl<Config>::BitsetType::Boundaries() {
-  return BoundariesArray;
-}
-
-
-template <class Config>
-size_t TypeImpl<Config>::BitsetType::BoundariesSize() {
+size_t BitsetType::BoundariesSize() {
   // Windows doesn't like arraysize here.
   // return arraysize(BoundariesArray);
   return 7;
 }
 
-
-template <class Config>
-typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::ExpandInternals(
-    typename TypeImpl<Config>::bitset bits) {
+Type::bitset BitsetType::ExpandInternals(Type::bitset bits) {
   DisallowHeapAllocation no_allocation;
   if (!(bits & SEMANTIC(kPlainNumber))) return bits;  // Shortcut.
   const Boundary* boundaries = Boundaries();
@@ -385,10 +338,7 @@
   return bits;
 }
 
-
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Lub(double min, double max) {
+Type::bitset BitsetType::Lub(double min, double max) {
   DisallowHeapAllocation no_allocation;
   int lub = kNone;
   const Boundary* mins = Boundaries();
@@ -402,17 +352,11 @@
   return lub | mins[BoundariesSize() - 1].internal;
 }
 
-
-template <class Config>
-typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::NumberBits(
-    bitset bits) {
+Type::bitset BitsetType::NumberBits(bitset bits) {
   return SEMANTIC(bits & kPlainNumber);
 }
 
-
-template <class Config>
-typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::Glb(
-    double min, double max) {
+Type::bitset BitsetType::Glb(double min, double max) {
   DisallowHeapAllocation no_allocation;
   int glb = kNone;
   const Boundary* mins = Boundaries();
@@ -431,9 +375,7 @@
   return glb & ~(SEMANTIC(kOtherNumber));
 }
 
-
-template <class Config>
-double TypeImpl<Config>::BitsetType::Min(bitset bits) {
+double BitsetType::Min(bitset bits) {
   DisallowHeapAllocation no_allocation;
   DCHECK(Is(SEMANTIC(bits), kNumber));
   const Boundary* mins = Boundaries();
@@ -447,9 +389,7 @@
   return std::numeric_limits<double>::quiet_NaN();
 }
 
-
-template<class Config>
-double TypeImpl<Config>::BitsetType::Max(bitset bits) {
+double BitsetType::Max(bitset bits) {
   DisallowHeapAllocation no_allocation;
   DCHECK(Is(SEMANTIC(bits), kNumber));
   const Boundary* mins = Boundaries();
@@ -471,9 +411,7 @@
 // -----------------------------------------------------------------------------
 // Predicates.
 
-
-template<class Config>
-bool TypeImpl<Config>::SimplyEquals(TypeImpl* that) {
+bool Type::SimplyEquals(Type* that) {
   DisallowHeapAllocation no_allocation;
   if (this->IsClass()) {
     return that->IsClass()
@@ -505,20 +443,29 @@
     }
     return true;
   }
+  if (this->IsTuple()) {
+    if (!that->IsTuple()) return false;
+    TupleType* this_tuple = this->AsTuple();
+    TupleType* that_tuple = that->AsTuple();
+    if (this_tuple->Arity() != that_tuple->Arity()) {
+      return false;
+    }
+    for (int i = 0, n = this_tuple->Arity(); i < n; ++i) {
+      if (!this_tuple->Element(i)->Equals(that_tuple->Element(i))) return false;
+    }
+    return true;
+  }
   UNREACHABLE();
   return false;
 }
 
-
-template <class Config>
-typename TypeImpl<Config>::bitset TypeImpl<Config>::Representation() {
+Type::bitset Type::Representation() {
   return REPRESENTATION(this->BitsetLub());
 }
 
 
 // Check if [this] <= [that].
-template<class Config>
-bool TypeImpl<Config>::SlowIs(TypeImpl* that) {
+bool Type::SlowIs(Type* that) {
   DisallowHeapAllocation no_allocation;
 
   // Fast bitset cases
@@ -542,8 +489,7 @@
 
 // Check if SEMANTIC([this]) <= SEMANTIC([that]). The result of the method
 // should be independent of the representation axis of the types.
-template <class Config>
-bool TypeImpl<Config>::SemanticIs(TypeImpl* that) {
+bool Type::SemanticIs(Type* that) {
   DisallowHeapAllocation no_allocation;
 
   if (this == that) return true;
@@ -566,7 +512,7 @@
   // T <= (T1 \/ ... \/ Tn)  if  (T <= T1) \/ ... \/ (T <= Tn)
   if (that->IsUnion()) {
     for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
-      if (this->SemanticIs(that->AsUnion()->Get(i)->unhandle())) return true;
+      if (this->SemanticIs(that->AsUnion()->Get(i))) return true;
       if (i > 1 && this->IsRange()) return false;  // Shortcut.
     }
     return false;
@@ -582,9 +528,28 @@
   return this->SimplyEquals(that);
 }
 
+// Most precise _current_ type of a value (usually its class).
+Type* Type::NowOf(i::Object* value, Zone* zone) {
+  if (value->IsSmi() ||
+      i::HeapObject::cast(value)->map()->instance_type() == HEAP_NUMBER_TYPE) {
+    return Of(value, zone);
+  }
+  return Class(i::handle(i::HeapObject::cast(value)->map()), zone);
+}
 
-template<class Config>
-bool TypeImpl<Config>::NowIs(TypeImpl* that) {
+bool Type::NowContains(i::Object* value) {
+  DisallowHeapAllocation no_allocation;
+  if (this->IsAny()) return true;
+  if (value->IsHeapObject()) {
+    i::Map* map = i::HeapObject::cast(value)->map();
+    for (Iterator<i::Map> it = this->Classes(); !it.Done(); it.Advance()) {
+      if (*it.Current() == map) return true;
+    }
+  }
+  return this->Contains(value);
+}
+
+bool Type::NowIs(Type* that) {
   DisallowHeapAllocation no_allocation;
 
   // TODO(rossberg): this is incorrect for
@@ -604,16 +569,14 @@
 
 
 // Check if [this] contains only (currently) stable classes.
-template<class Config>
-bool TypeImpl<Config>::NowStable() {
+bool Type::NowStable() {
   DisallowHeapAllocation no_allocation;
   return !this->IsClass() || this->AsClass()->Map()->is_stable();
 }
 
 
 // Check if [this] and [that] overlap.
-template<class Config>
-bool TypeImpl<Config>::Maybe(TypeImpl* that) {
+bool Type::Maybe(Type* that) {
   DisallowHeapAllocation no_allocation;
 
   // Take care of the representation part (and also approximate
@@ -624,8 +587,7 @@
   return SemanticMaybe(that);
 }
 
-template <class Config>
-bool TypeImpl<Config>::SemanticMaybe(TypeImpl* that) {
+bool Type::SemanticMaybe(Type* that) {
   DisallowHeapAllocation no_allocation;
 
   // (T1 \/ ... \/ Tn) overlaps T  if  (T1 overlaps T) \/ ... \/ (Tn overlaps T)
@@ -639,7 +601,7 @@
   // T overlaps (T1 \/ ... \/ Tn)  if  (T overlaps T1) \/ ... \/ (T overlaps Tn)
   if (that->IsUnion()) {
     for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
-      if (this->SemanticMaybe(that->AsUnion()->Get(i)->unhandle())) return true;
+      if (this->SemanticMaybe(that->AsUnion()->Get(i))) return true;
     }
     return false;
   }
@@ -679,33 +641,28 @@
 
 
 // Return the range in [this], or [NULL].
-template<class Config>
-typename TypeImpl<Config>::RangeType* TypeImpl<Config>::GetRange() {
+Type* Type::GetRange() {
   DisallowHeapAllocation no_allocation;
-  if (this->IsRange()) return this->AsRange();
+  if (this->IsRange()) return this;
   if (this->IsUnion() && this->AsUnion()->Get(1)->IsRange()) {
-    return this->AsUnion()->Get(1)->AsRange();
+    return this->AsUnion()->Get(1);
   }
   return NULL;
 }
 
-
-template<class Config>
-bool TypeImpl<Config>::Contains(i::Object* value) {
+bool Type::Contains(i::Object* value) {
   DisallowHeapAllocation no_allocation;
   for (Iterator<i::Object> it = this->Constants(); !it.Done(); it.Advance()) {
     if (*it.Current() == value) return true;
   }
   if (IsInteger(value)) {
-    RangeType* range = this->GetRange();
-    if (range != NULL && Contains(range, value)) return true;
+    Type* range = this->GetRange();
+    if (range != NULL && Contains(range->AsRange(), value)) return true;
   }
   return BitsetType::New(BitsetType::Lub(value))->Is(this);
 }
 
-
-template<class Config>
-bool TypeImpl<Config>::UnionType::Wellformed() {
+bool UnionType::Wellformed() {
   DisallowHeapAllocation no_allocation;
   // This checks the invariants of the union representation:
   // 1. There are at least two elements.
@@ -724,7 +681,7 @@
     DCHECK(!this->Get(i)->IsUnion());               // (4)
     for (int j = 0; j < this->Length(); ++j) {
       if (i != j && i != 0)
-        DCHECK(!this->Get(i)->SemanticIs(this->Get(j)->unhandle()));  // (5)
+        DCHECK(!this->Get(i)->SemanticIs(this->Get(j)));  // (5)
     }
   }
   DCHECK(!this->Get(1)->IsRange() ||
@@ -744,14 +701,10 @@
       y >= std::numeric_limits<int>::min() - x;
 }
 
-
-template<class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
-    TypeHandle type1, TypeHandle type2, Region* region) {
-
+Type* Type::Intersect(Type* type1, Type* type2, Zone* zone) {
   // Fast case: bit sets.
   if (type1->IsBitset() && type2->IsBitset()) {
-    return BitsetType::New(type1->AsBitset() & type2->AsBitset(), region);
+    return BitsetType::New(type1->AsBitset() & type2->AsBitset());
   }
 
   // Fast case: top or bottom types.
@@ -775,47 +728,45 @@
   // semi-fast case above - we should behave the same way regardless of
   // representations. Intersection with a universal bitset should only update
   // the representations.
-  if (type1->SemanticIs(type2->unhandle())) {
-    type2 = Any(region);
-  } else if (type2->SemanticIs(type1->unhandle())) {
-    type1 = Any(region);
+  if (type1->SemanticIs(type2)) {
+    type2 = Any();
+  } else if (type2->SemanticIs(type1)) {
+    type1 = Any();
   }
 
   bitset bits =
       SEMANTIC(type1->BitsetGlb() & type2->BitsetGlb()) | representation;
   int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
   int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
-  if (!AddIsSafe(size1, size2)) return Any(region);
+  if (!AddIsSafe(size1, size2)) return Any();
   int size = size1 + size2;
-  if (!AddIsSafe(size, 2)) return Any(region);
+  if (!AddIsSafe(size, 2)) return Any();
   size += 2;
-  UnionHandle result = UnionType::New(size, region);
+  Type* result_type = UnionType::New(size, zone);
+  UnionType* result = result_type->AsUnion();
   size = 0;
 
   // Deal with bitsets.
-  result->Set(size++, BitsetType::New(bits, region));
+  result->Set(size++, BitsetType::New(bits));
 
-  Limits lims = Limits::Empty();
-  size = IntersectAux(type1, type2, result, size, &lims, region);
+  RangeType::Limits lims = RangeType::Limits::Empty();
+  size = IntersectAux(type1, type2, result, size, &lims, zone);
 
   // If the range is not empty, then insert it into the union and
   // remove the number bits from the bitset.
   if (!lims.IsEmpty()) {
-    size = UpdateRange(RangeType::New(lims, representation, region), result,
-                       size, region);
+    size = UpdateRange(RangeType::New(lims, representation, zone), result, size,
+                       zone);
 
     // Remove the number bits.
     bitset number_bits = BitsetType::NumberBits(bits);
     bits &= ~number_bits;
-    result->Set(0, BitsetType::New(bits, region));
+    result->Set(0, BitsetType::New(bits));
   }
-  return NormalizeUnion(result, size, region);
+  return NormalizeUnion(result_type, size, zone);
 }
 
-
-template<class Config>
-int TypeImpl<Config>::UpdateRange(
-    RangeHandle range, UnionHandle result, int size, Region* region) {
+int Type::UpdateRange(Type* range, UnionType* result, int size, Zone* zone) {
   if (size == 1) {
     result->Set(size++, range);
   } else {
@@ -826,7 +777,7 @@
 
   // Remove any components that just got subsumed.
   for (int i = 2; i < size; ) {
-    if (result->Get(i)->SemanticIs(range->unhandle())) {
+    if (result->Get(i)->SemanticIs(range)) {
       result->Set(i, result->Get(--size));
     } else {
       ++i;
@@ -835,44 +786,37 @@
   return size;
 }
 
-
-template <class Config>
-typename TypeImpl<Config>::Limits TypeImpl<Config>::ToLimits(bitset bits,
-                                                             Region* region) {
+RangeType::Limits Type::ToLimits(bitset bits, Zone* zone) {
   bitset number_bits = BitsetType::NumberBits(bits);
 
   if (number_bits == BitsetType::kNone) {
-    return Limits::Empty();
+    return RangeType::Limits::Empty();
   }
 
-  return Limits(BitsetType::Min(number_bits), BitsetType::Max(number_bits));
+  return RangeType::Limits(BitsetType::Min(number_bits),
+                           BitsetType::Max(number_bits));
 }
 
-
-template <class Config>
-typename TypeImpl<Config>::Limits TypeImpl<Config>::IntersectRangeAndBitset(
-    TypeHandle range, TypeHandle bitset, Region* region) {
-  Limits range_lims(range->AsRange());
-  Limits bitset_lims = ToLimits(bitset->AsBitset(), region);
-  return Limits::Intersect(range_lims, bitset_lims);
+RangeType::Limits Type::IntersectRangeAndBitset(Type* range, Type* bitset,
+                                                Zone* zone) {
+  RangeType::Limits range_lims(range->AsRange());
+  RangeType::Limits bitset_lims = ToLimits(bitset->AsBitset(), zone);
+  return RangeType::Limits::Intersect(range_lims, bitset_lims);
 }
 
-
-template <class Config>
-int TypeImpl<Config>::IntersectAux(TypeHandle lhs, TypeHandle rhs,
-                                   UnionHandle result, int size, Limits* lims,
-                                   Region* region) {
+int Type::IntersectAux(Type* lhs, Type* rhs, UnionType* result, int size,
+                       RangeType::Limits* lims, Zone* zone) {
   if (lhs->IsUnion()) {
     for (int i = 0, n = lhs->AsUnion()->Length(); i < n; ++i) {
       size =
-          IntersectAux(lhs->AsUnion()->Get(i), rhs, result, size, lims, region);
+          IntersectAux(lhs->AsUnion()->Get(i), rhs, result, size, lims, zone);
     }
     return size;
   }
   if (rhs->IsUnion()) {
     for (int i = 0, n = rhs->AsUnion()->Length(); i < n; ++i) {
       size =
-          IntersectAux(lhs, rhs->AsUnion()->Get(i), result, size, lims, region);
+          IntersectAux(lhs, rhs->AsUnion()->Get(i), result, size, lims, zone);
     }
     return size;
   }
@@ -883,40 +827,41 @@
 
   if (lhs->IsRange()) {
     if (rhs->IsBitset()) {
-      Limits lim = IntersectRangeAndBitset(lhs, rhs, region);
+      RangeType::Limits lim = IntersectRangeAndBitset(lhs, rhs, zone);
 
       if (!lim.IsEmpty()) {
-        *lims = Limits::Union(lim, *lims);
+        *lims = RangeType::Limits::Union(lim, *lims);
       }
       return size;
     }
     if (rhs->IsClass()) {
-      *lims = Limits::Union(Limits(lhs->AsRange()), *lims);
+      *lims =
+          RangeType::Limits::Union(RangeType::Limits(lhs->AsRange()), *lims);
     }
     if (rhs->IsConstant() && Contains(lhs->AsRange(), rhs->AsConstant())) {
-      return AddToUnion(rhs, result, size, region);
+      return AddToUnion(rhs, result, size, zone);
     }
     if (rhs->IsRange()) {
-      Limits lim = Limits::Intersect(
-          Limits(lhs->AsRange()), Limits(rhs->AsRange()));
+      RangeType::Limits lim = RangeType::Limits::Intersect(
+          RangeType::Limits(lhs->AsRange()), RangeType::Limits(rhs->AsRange()));
       if (!lim.IsEmpty()) {
-        *lims = Limits::Union(lim, *lims);
+        *lims = RangeType::Limits::Union(lim, *lims);
       }
     }
     return size;
   }
   if (rhs->IsRange()) {
     // This case is handled symmetrically above.
-    return IntersectAux(rhs, lhs, result, size, lims, region);
+    return IntersectAux(rhs, lhs, result, size, lims, zone);
   }
   if (lhs->IsBitset() || rhs->IsBitset()) {
-    return AddToUnion(lhs->IsBitset() ? rhs : lhs, result, size, region);
+    return AddToUnion(lhs->IsBitset() ? rhs : lhs, result, size, zone);
   }
   if (lhs->IsClass() != rhs->IsClass()) {
-    return AddToUnion(lhs->IsClass() ? rhs : lhs, result, size, region);
+    return AddToUnion(lhs->IsClass() ? rhs : lhs, result, size, zone);
   }
-  if (lhs->SimplyEquals(rhs->unhandle())) {
-    return AddToUnion(lhs, result, size, region);
+  if (lhs->SimplyEquals(rhs)) {
+    return AddToUnion(lhs, result, size, zone);
   }
   return size;
 }
@@ -926,9 +871,7 @@
 // If the range is non-empty, the number bits in the bitset should be
 // clear. Moreover, if we have a canonical range (such as Signed32),
 // we want to produce a bitset rather than a range.
-template <class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeRangeAndBitset(
-    RangeHandle range, bitset* bits, Region* region) {
+Type* Type::NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone) {
   // Fast path: If the bitset does not mention numbers, we can just keep the
   // range.
   bitset number_bits = BitsetType::NumberBits(*bits);
@@ -940,7 +883,7 @@
   // leave the bitset untouched.
   bitset range_lub = SEMANTIC(range->BitsetLub());
   if (BitsetType::Is(range_lub, *bits)) {
-    return None(region);
+    return None();
   }
 
   // Slow path: reconcile the bitset range and the range.
@@ -966,17 +909,13 @@
   if (bitset_max > range_max) {
     range_max = bitset_max;
   }
-  return RangeType::New(range_min, range_max,
-                        BitsetType::New(BitsetType::kNone, region), region);
+  return RangeType::New(range_min, range_max, BitsetType::kNone, zone);
 }
 
-
-template<class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
-    TypeHandle type1, TypeHandle type2, Region* region) {
+Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
   // Fast case: bit sets.
   if (type1->IsBitset() && type2->IsBitset()) {
-    return BitsetType::New(type1->AsBitset() | type2->AsBitset(), region);
+    return BitsetType::New(type1->AsBitset() | type2->AsBitset());
   }
 
   // Fast case: top or bottom types.
@@ -997,63 +936,62 @@
   // Slow case: create union.
   int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
   int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
-  if (!AddIsSafe(size1, size2)) return Any(region);
+  if (!AddIsSafe(size1, size2)) return Any();
   int size = size1 + size2;
-  if (!AddIsSafe(size, 2)) return Any(region);
+  if (!AddIsSafe(size, 2)) return Any();
   size += 2;
-  UnionHandle result = UnionType::New(size, region);
+  Type* result_type = UnionType::New(size, zone);
+  UnionType* result = result_type->AsUnion();
   size = 0;
 
   // Compute the new bitset.
   bitset new_bitset = SEMANTIC(type1->BitsetGlb() | type2->BitsetGlb());
 
   // Deal with ranges.
-  TypeHandle range = None(region);
-  RangeType* range1 = type1->GetRange();
-  RangeType* range2 = type2->GetRange();
+  Type* range = None();
+  Type* range1 = type1->GetRange();
+  Type* range2 = type2->GetRange();
   if (range1 != NULL && range2 != NULL) {
-    Limits lims = Limits::Union(Limits(range1), Limits(range2));
-    RangeHandle union_range = RangeType::New(lims, representation, region);
-    range = NormalizeRangeAndBitset(union_range, &new_bitset, region);
+    RangeType::Limits lims =
+        RangeType::Limits::Union(RangeType::Limits(range1->AsRange()),
+                                 RangeType::Limits(range2->AsRange()));
+    Type* union_range = RangeType::New(lims, representation, zone);
+    range = NormalizeRangeAndBitset(union_range, &new_bitset, zone);
   } else if (range1 != NULL) {
-    range = NormalizeRangeAndBitset(handle(range1), &new_bitset, region);
+    range = NormalizeRangeAndBitset(range1, &new_bitset, zone);
   } else if (range2 != NULL) {
-    range = NormalizeRangeAndBitset(handle(range2), &new_bitset, region);
+    range = NormalizeRangeAndBitset(range2, &new_bitset, zone);
   }
   new_bitset = SEMANTIC(new_bitset) | representation;
-  TypeHandle bits = BitsetType::New(new_bitset, region);
+  Type* bits = BitsetType::New(new_bitset);
   result->Set(size++, bits);
   if (!range->IsNone()) result->Set(size++, range);
 
-  size = AddToUnion(type1, result, size, region);
-  size = AddToUnion(type2, result, size, region);
-  return NormalizeUnion(result, size, region);
+  size = AddToUnion(type1, result, size, zone);
+  size = AddToUnion(type2, result, size, zone);
+  return NormalizeUnion(result_type, size, zone);
 }
 
 
 // Add [type] to [result] unless [type] is bitset, range, or already subsumed.
 // Return new size of [result].
-template<class Config>
-int TypeImpl<Config>::AddToUnion(
-    TypeHandle type, UnionHandle result, int size, Region* region) {
+int Type::AddToUnion(Type* type, UnionType* result, int size, Zone* zone) {
   if (type->IsBitset() || type->IsRange()) return size;
   if (type->IsUnion()) {
     for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
-      size = AddToUnion(type->AsUnion()->Get(i), result, size, region);
+      size = AddToUnion(type->AsUnion()->Get(i), result, size, zone);
     }
     return size;
   }
   for (int i = 0; i < size; ++i) {
-    if (type->SemanticIs(result->Get(i)->unhandle())) return size;
+    if (type->SemanticIs(result->Get(i))) return size;
   }
   result->Set(size++, type);
   return size;
 }
 
-
-template <class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeUnion(
-    UnionHandle unioned, int size, Region* region) {
+Type* Type::NormalizeUnion(Type* union_type, int size, Zone* zone) {
+  UnionType* unioned = union_type->AsUnion();
   DCHECK(size >= 1);
   DCHECK(unioned->Get(0)->IsBitset());
   // If the union has just one element, return it.
@@ -1069,13 +1007,13 @@
     }
     if (unioned->Get(1)->IsRange()) {
       return RangeType::New(unioned->Get(1)->AsRange()->Min(),
-                            unioned->Get(1)->AsRange()->Max(), unioned->Get(0),
-                            region);
+                            unioned->Get(1)->AsRange()->Max(),
+                            unioned->Get(0)->AsBitset(), zone);
     }
   }
   unioned->Shrink(size);
   SLOW_DCHECK(unioned->Wellformed());
-  return unioned;
+  return union_type;
 }
 
 
@@ -1083,26 +1021,21 @@
 // Component extraction
 
 // static
-template <class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Representation(
-    TypeHandle t, Region* region) {
-  return BitsetType::New(t->Representation(), region);
+Type* Type::Representation(Type* t, Zone* zone) {
+  return BitsetType::New(t->Representation());
 }
 
 
 // static
-template <class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Semantic(
-    TypeHandle t, Region* region) {
-  return Intersect(t, BitsetType::New(BitsetType::kSemantic, region), region);
+Type* Type::Semantic(Type* t, Zone* zone) {
+  return Intersect(t, BitsetType::New(BitsetType::kSemantic), zone);
 }
 
 
 // -----------------------------------------------------------------------------
 // Iteration.
 
-template<class Config>
-int TypeImpl<Config>::NumClasses() {
+int Type::NumClasses() {
   DisallowHeapAllocation no_allocation;
   if (this->IsClass()) {
     return 1;
@@ -1117,9 +1050,7 @@
   }
 }
 
-
-template<class Config>
-int TypeImpl<Config>::NumConstants() {
+int Type::NumConstants() {
   DisallowHeapAllocation no_allocation;
   if (this->IsConstant()) {
     return 1;
@@ -1134,10 +1065,8 @@
   }
 }
 
-
-template<class Config> template<class T>
-typename TypeImpl<Config>::TypeHandle
-TypeImpl<Config>::Iterator<T>::get_type() {
+template <class T>
+Type* Type::Iterator<T>::get_type() {
   DCHECK(!Done());
   return type_->IsUnion() ? type_->AsUnion()->Get(index_) : type_;
 }
@@ -1145,46 +1074,40 @@
 
 // C++ cannot specialise nested templates, so we have to go through this
 // contortion with an auxiliary template to simulate it.
-template<class Config, class T>
+template <class T>
 struct TypeImplIteratorAux {
-  static bool matches(typename TypeImpl<Config>::TypeHandle type);
-  static i::Handle<T> current(typename TypeImpl<Config>::TypeHandle type);
+  static bool matches(Type* type);
+  static i::Handle<T> current(Type* type);
 };
 
-template<class Config>
-struct TypeImplIteratorAux<Config, i::Map> {
-  static bool matches(typename TypeImpl<Config>::TypeHandle type) {
-    return type->IsClass();
-  }
-  static i::Handle<i::Map> current(typename TypeImpl<Config>::TypeHandle type) {
+template <>
+struct TypeImplIteratorAux<i::Map> {
+  static bool matches(Type* type) { return type->IsClass(); }
+  static i::Handle<i::Map> current(Type* type) {
     return type->AsClass()->Map();
   }
 };
 
-template<class Config>
-struct TypeImplIteratorAux<Config, i::Object> {
-  static bool matches(typename TypeImpl<Config>::TypeHandle type) {
-    return type->IsConstant();
-  }
-  static i::Handle<i::Object> current(
-      typename TypeImpl<Config>::TypeHandle type) {
+template <>
+struct TypeImplIteratorAux<i::Object> {
+  static bool matches(Type* type) { return type->IsConstant(); }
+  static i::Handle<i::Object> current(Type* type) {
     return type->AsConstant()->Value();
   }
 };
 
-template<class Config> template<class T>
-bool TypeImpl<Config>::Iterator<T>::matches(TypeHandle type) {
-  return TypeImplIteratorAux<Config, T>::matches(type);
+template <class T>
+bool Type::Iterator<T>::matches(Type* type) {
+  return TypeImplIteratorAux<T>::matches(type);
 }
 
-template<class Config> template<class T>
-i::Handle<T> TypeImpl<Config>::Iterator<T>::Current() {
-  return TypeImplIteratorAux<Config, T>::current(get_type());
+template <class T>
+i::Handle<T> Type::Iterator<T>::Current() {
+  return TypeImplIteratorAux<T>::current(get_type());
 }
 
-
-template<class Config> template<class T>
-void TypeImpl<Config>::Iterator<T>::Advance() {
+template <class T>
+void Type::Iterator<T>::Advance() {
   DisallowHeapAllocation no_allocation;
   ++index_;
   if (type_->IsUnion()) {
@@ -1199,59 +1122,9 @@
 
 
 // -----------------------------------------------------------------------------
-// Conversion between low-level representations.
-
-template<class Config>
-template<class OtherType>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
-    typename OtherType::TypeHandle type, Region* region) {
-  if (type->IsBitset()) {
-    return BitsetType::New(type->AsBitset(), region);
-  } else if (type->IsClass()) {
-    return ClassType::New(type->AsClass()->Map(), region);
-  } else if (type->IsConstant()) {
-    return ConstantType::New(type->AsConstant()->Value(), region);
-  } else if (type->IsRange()) {
-    return RangeType::New(
-        type->AsRange()->Min(), type->AsRange()->Max(),
-        BitsetType::New(REPRESENTATION(type->BitsetLub()), region), region);
-  } else if (type->IsContext()) {
-    TypeHandle outer = Convert<OtherType>(type->AsContext()->Outer(), region);
-    return ContextType::New(outer, region);
-  } else if (type->IsUnion()) {
-    int length = type->AsUnion()->Length();
-    UnionHandle unioned = UnionType::New(length, region);
-    for (int i = 0; i < length; ++i) {
-      TypeHandle t = Convert<OtherType>(type->AsUnion()->Get(i), region);
-      unioned->Set(i, t);
-    }
-    return unioned;
-  } else if (type->IsArray()) {
-    TypeHandle element = Convert<OtherType>(type->AsArray()->Element(), region);
-    return ArrayType::New(element, region);
-  } else if (type->IsFunction()) {
-    TypeHandle res = Convert<OtherType>(type->AsFunction()->Result(), region);
-    TypeHandle rcv = Convert<OtherType>(type->AsFunction()->Receiver(), region);
-    FunctionHandle function = FunctionType::New(
-        res, rcv, type->AsFunction()->Arity(), region);
-    for (int i = 0; i < function->Arity(); ++i) {
-      TypeHandle param = Convert<OtherType>(
-          type->AsFunction()->Parameter(i), region);
-      function->InitParameter(i, param);
-    }
-    return function;
-  } else {
-    UNREACHABLE();
-    return None(region);
-  }
-}
-
-
-// -----------------------------------------------------------------------------
 // Printing.
 
-template<class Config>
-const char* TypeImpl<Config>::BitsetType::Name(bitset bits) {
+const char* BitsetType::Name(bitset bits) {
   switch (bits) {
     case REPRESENTATION(kAny): return "Any";
     #define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \
@@ -1270,10 +1143,8 @@
   }
 }
 
-
-template <class Config>
-void TypeImpl<Config>::BitsetType::Print(std::ostream& os,  // NOLINT
-                                         bitset bits) {
+void BitsetType::Print(std::ostream& os,  // NOLINT
+                       bitset bits) {
   DisallowHeapAllocation no_allocation;
   const char* name = Name(bits);
   if (name != NULL) {
@@ -1309,9 +1180,7 @@
   os << ")";
 }
 
-
-template <class Config>
-void TypeImpl<Config>::PrintTo(std::ostream& os, PrintDimension dim) {
+void Type::PrintTo(std::ostream& os, PrintDimension dim) {
   DisallowHeapAllocation no_allocation;
   if (dim != REPRESENTATION_DIM) {
     if (this->IsBitset()) {
@@ -1336,7 +1205,7 @@
     } else if (this->IsUnion()) {
       os << "(";
       for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
-        TypeHandle type_i = this->AsUnion()->Get(i);
+        Type* type_i = this->AsUnion()->Get(i);
         if (i > 0) os << " | ";
         type_i->PrintTo(os, dim);
       }
@@ -1357,6 +1226,14 @@
       }
       os << ")->";
       this->AsFunction()->Result()->PrintTo(os, dim);
+    } else if (this->IsTuple()) {
+      os << "<";
+      for (int i = 0, n = this->AsTuple()->Arity(); i < n; ++i) {
+        Type* type_i = this->AsTuple()->Element(i);
+        if (i > 0) os << ", ";
+        type_i->PrintTo(os, dim);
+      }
+      os << ">";
     } else {
       UNREACHABLE();
     }
@@ -1369,38 +1246,38 @@
 
 
 #ifdef DEBUG
-template <class Config>
-void TypeImpl<Config>::Print() {
+void Type::Print() {
   OFStream os(stdout);
   PrintTo(os);
   os << std::endl;
 }
-template <class Config>
-void TypeImpl<Config>::BitsetType::Print(bitset bits) {
+void BitsetType::Print(bitset bits) {
   OFStream os(stdout);
   Print(os, bits);
   os << std::endl;
 }
 #endif
 
+BitsetType::bitset BitsetType::SignedSmall() {
+  return i::SmiValuesAre31Bits() ? kSigned31 : kSigned32;
+}
+
+BitsetType::bitset BitsetType::UnsignedSmall() {
+  return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
+}
+
+#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
+  Type* Type::Name(Isolate* isolate, Zone* zone) {                   \
+    return Class(i::handle(isolate->heap()->name##_map()), zone);    \
+  }
+SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
+#undef CONSTRUCT_SIMD_TYPE
 
 // -----------------------------------------------------------------------------
 // Instantiations.
 
-template class TypeImpl<ZoneTypeConfig>;
-template class TypeImpl<ZoneTypeConfig>::Iterator<i::Map>;
-template class TypeImpl<ZoneTypeConfig>::Iterator<i::Object>;
-
-template class TypeImpl<HeapTypeConfig>;
-template class TypeImpl<HeapTypeConfig>::Iterator<i::Map>;
-template class TypeImpl<HeapTypeConfig>::Iterator<i::Object>;
-
-template TypeImpl<ZoneTypeConfig>::TypeHandle
-  TypeImpl<ZoneTypeConfig>::Convert<HeapType>(
-    TypeImpl<HeapTypeConfig>::TypeHandle, TypeImpl<ZoneTypeConfig>::Region*);
-template TypeImpl<HeapTypeConfig>::TypeHandle
-  TypeImpl<HeapTypeConfig>::Convert<Type>(
-    TypeImpl<ZoneTypeConfig>::TypeHandle, TypeImpl<HeapTypeConfig>::Region*);
+template class Type::Iterator<i::Map>;
+template class Type::Iterator<i::Object>;
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/types.h b/src/types.h
index 9ce650d..9984ad8 100644
--- a/src/types.h
+++ b/src/types.h
@@ -143,14 +143,6 @@
 // bitsets. Bit 0 is reserved for tagging. Class is a heap pointer to the
 // respective map. Only structured types require allocation.
 // Note that the bitset representation is closed under both Union and Intersect.
-//
-// There are two type representations, using different allocation:
-//
-// - class Type (zone-allocated, for compiler and concurrent compilation)
-// - class HeapType (heap-allocated, for persistent types)
-//
-// Both provide the same API, and the Convert method can be used to interconvert
-// them. For zone types, no query method touches the heap, only constructors do.
 
 
 // -----------------------------------------------------------------------------
@@ -159,20 +151,21 @@
 // clang-format off
 
 #define MASK_BITSET_TYPE_LIST(V) \
-  V(Representation, 0xff800000u) \
-  V(Semantic,       0x007ffffeu)
+  V(Representation, 0xffc00000u) \
+  V(Semantic,       0x003ffffeu)
 
 #define REPRESENTATION(k) ((k) & BitsetType::kRepresentation)
 #define SEMANTIC(k)       ((k) & BitsetType::kSemantic)
 
 #define REPRESENTATION_BITSET_TYPE_LIST(V)    \
   V(None,               0)                    \
-  V(UntaggedBit,        1u << 23 | kSemantic) \
-  V(UntaggedIntegral8,  1u << 24 | kSemantic) \
-  V(UntaggedIntegral16, 1u << 25 | kSemantic) \
-  V(UntaggedIntegral32, 1u << 26 | kSemantic) \
-  V(UntaggedFloat32,    1u << 27 | kSemantic) \
-  V(UntaggedFloat64,    1u << 28 | kSemantic) \
+  V(UntaggedBit,        1u << 22 | kSemantic) \
+  V(UntaggedIntegral8,  1u << 23 | kSemantic) \
+  V(UntaggedIntegral16, 1u << 24 | kSemantic) \
+  V(UntaggedIntegral32, 1u << 25 | kSemantic) \
+  V(UntaggedFloat32,    1u << 26 | kSemantic) \
+  V(UntaggedFloat64,    1u << 27 | kSemantic) \
+  V(UntaggedSimd128,    1u << 28 | kSemantic) \
   V(UntaggedPointer,    1u << 29 | kSemantic) \
   V(TaggedSigned,       1u << 30 | kSemantic) \
   V(TaggedPointer,      1u << 31 | kSemantic) \
@@ -267,400 +260,27 @@
   INTERNAL_BITSET_TYPE_LIST(V)       \
   SEMANTIC_BITSET_TYPE_LIST(V)
 
-
-// -----------------------------------------------------------------------------
-// The abstract Type class, parameterized over the low-level representation.
-
-// struct Config {
-//   typedef TypeImpl<Config> Type;
-//   typedef Base;
-//   typedef Struct;
-//   typedef Range;
-//   typedef Region;
-//   template<class> struct Handle { typedef type; }  // No template typedefs...
-//
-//   template<class T> static Handle<T>::type null_handle();
-//   template<class T> static Handle<T>::type handle(T* t);  // !is_bitset(t)
-//   template<class T> static Handle<T>::type cast(Handle<Type>::type);
-//
-//   static bool is_bitset(Type*);
-//   static bool is_class(Type*);
-//   static bool is_struct(Type*, int tag);
-//   static bool is_range(Type*);
-//
-//   static bitset as_bitset(Type*);
-//   static i::Handle<i::Map> as_class(Type*);
-//   static Handle<Struct>::type as_struct(Type*);
-//   static Handle<Range>::type as_range(Type*);
-//
-//   static Type* from_bitset(bitset);
-//   static Handle<Type>::type from_bitset(bitset, Region*);
-//   static Handle<Type>::type from_class(i::Handle<Map>, Region*);
-//   static Handle<Type>::type from_struct(Handle<Struct>::type, int tag);
-//   static Handle<Type>::type from_range(Handle<Range>::type);
-//
-//   static Handle<Struct>::type struct_create(int tag, int length, Region*);
-//   static void struct_shrink(Handle<Struct>::type, int length);
-//   static int struct_tag(Handle<Struct>::type);
-//   static int struct_length(Handle<Struct>::type);
-//   static Handle<Type>::type struct_get(Handle<Struct>::type, int);
-//   static void struct_set(Handle<Struct>::type, int, Handle<Type>::type);
-//   template<class V>
-//   static i::Handle<V> struct_get_value(Handle<Struct>::type, int);
-//   template<class V>
-//   static void struct_set_value(Handle<Struct>::type, int, i::Handle<V>);
-//
-//   static Handle<Range>::type range_create(Region*);
-//   static int range_get_bitset(Handle<Range>::type);
-//   static void range_set_bitset(Handle<Range>::type, int);
-//   static double range_get_double(Handle<Range>::type, int);
-//   static void range_set_double(Handle<Range>::type, int, double, Region*);
-// }
-template<class Config>
-class TypeImpl : public Config::Base {
- public:
-  // Auxiliary types.
-
-  typedef uint32_t bitset;  // Internal
-  class BitsetType;         // Internal
-  class StructuralType;     // Internal
-  class UnionType;          // Internal
-
-  class ClassType;
-  class ConstantType;
-  class RangeType;
-  class ContextType;
-  class ArrayType;
-  class FunctionType;
-
-  typedef typename Config::template Handle<TypeImpl>::type TypeHandle;
-  typedef typename Config::template Handle<ClassType>::type ClassHandle;
-  typedef typename Config::template Handle<ConstantType>::type ConstantHandle;
-  typedef typename Config::template Handle<RangeType>::type RangeHandle;
-  typedef typename Config::template Handle<ContextType>::type ContextHandle;
-  typedef typename Config::template Handle<ArrayType>::type ArrayHandle;
-  typedef typename Config::template Handle<FunctionType>::type FunctionHandle;
-  typedef typename Config::template Handle<UnionType>::type UnionHandle;
-  typedef typename Config::Region Region;
-
-  // Constructors.
-
-  #define DEFINE_TYPE_CONSTRUCTOR(type, value)                                \
-    static TypeImpl* type() {                                                 \
-      return BitsetType::New(BitsetType::k##type);                            \
-    }                                                                         \
-    static TypeHandle type(Region* region) {                                  \
-      return BitsetType::New(BitsetType::k##type, region);                    \
-    }
-  PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
-  #undef DEFINE_TYPE_CONSTRUCTOR
-
-  static TypeImpl* SignedSmall() {
-    return BitsetType::New(BitsetType::SignedSmall());
-  }
-  static TypeHandle SignedSmall(Region* region) {
-    return BitsetType::New(BitsetType::SignedSmall(), region);
-  }
-  static TypeImpl* UnsignedSmall() {
-    return BitsetType::New(BitsetType::UnsignedSmall());
-  }
-  static TypeHandle UnsignedSmall(Region* region) {
-    return BitsetType::New(BitsetType::UnsignedSmall(), region);
-  }
-
-  static TypeHandle Class(i::Handle<i::Map> map, Region* region) {
-    return ClassType::New(map, region);
-  }
-  static TypeHandle Constant(i::Handle<i::Object> value, Region* region) {
-    return ConstantType::New(value, region);
-  }
-  static TypeHandle Range(double min, double max, Region* region) {
-    return RangeType::New(
-        min, max, BitsetType::New(REPRESENTATION(BitsetType::kTagged |
-                                                 BitsetType::kUntaggedNumber),
-                                  region),
-        region);
-  }
-  static TypeHandle Context(TypeHandle outer, Region* region) {
-    return ContextType::New(outer, region);
-  }
-  static TypeHandle Array(TypeHandle element, Region* region) {
-    return ArrayType::New(element, region);
-  }
-  static FunctionHandle Function(
-      TypeHandle result, TypeHandle receiver, int arity, Region* region) {
-    return FunctionType::New(result, receiver, arity, region);
-  }
-  static TypeHandle Function(TypeHandle result, Region* region) {
-    return Function(result, Any(region), 0, region);
-  }
-  static TypeHandle Function(
-      TypeHandle result, TypeHandle param0, Region* region) {
-    FunctionHandle function = Function(result, Any(region), 1, region);
-    function->InitParameter(0, param0);
-    return function;
-  }
-  static TypeHandle Function(
-      TypeHandle result, TypeHandle param0, TypeHandle param1, Region* region) {
-    FunctionHandle function = Function(result, Any(region), 2, region);
-    function->InitParameter(0, param0);
-    function->InitParameter(1, param1);
-    return function;
-  }
-  static TypeHandle Function(
-      TypeHandle result, TypeHandle param0, TypeHandle param1,
-      TypeHandle param2, Region* region) {
-    FunctionHandle function = Function(result, Any(region), 3, region);
-    function->InitParameter(0, param0);
-    function->InitParameter(1, param1);
-    function->InitParameter(2, param2);
-    return function;
-  }
-  static TypeHandle Function(TypeHandle result, int arity, TypeHandle* params,
-                             Region* region) {
-    FunctionHandle function = Function(result, Any(region), arity, region);
-    for (int i = 0; i < arity; ++i) {
-      function->InitParameter(i, params[i]);
-    }
-    return function;
-  }
-
-#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
-  static TypeHandle Name(Isolate* isolate, Region* region);
-  SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
-#undef CONSTRUCT_SIMD_TYPE
-
-  static TypeHandle Union(TypeHandle type1, TypeHandle type2, Region* reg);
-  static TypeHandle Intersect(TypeHandle type1, TypeHandle type2, Region* reg);
-
-  static TypeHandle Of(double value, Region* region) {
-    return Config::from_bitset(BitsetType::ExpandInternals(
-        BitsetType::Lub(value)), region);
-  }
-  static TypeHandle Of(i::Object* value, Region* region) {
-    return Config::from_bitset(BitsetType::ExpandInternals(
-        BitsetType::Lub(value)), region);
-  }
-  static TypeHandle Of(i::Handle<i::Object> value, Region* region) {
-    return Of(*value, region);
-  }
-
-  // Extraction of components.
-  static TypeHandle Representation(TypeHandle t, Region* region);
-  static TypeHandle Semantic(TypeHandle t, Region* region);
-
-  // Predicates.
-  bool IsInhabited() { return BitsetType::IsInhabited(this->BitsetLub()); }
-
-  bool Is(TypeImpl* that) { return this == that || this->SlowIs(that); }
-  template<class TypeHandle>
-  bool Is(TypeHandle that) { return this->Is(*that); }
-
-  bool Maybe(TypeImpl* that);
-  template<class TypeHandle>
-  bool Maybe(TypeHandle that) { return this->Maybe(*that); }
-
-  bool Equals(TypeImpl* that) { return this->Is(that) && that->Is(this); }
-  template<class TypeHandle>
-  bool Equals(TypeHandle that) { return this->Equals(*that); }
-
-  // Equivalent to Constant(val)->Is(this), but avoiding allocation.
-  bool Contains(i::Object* val);
-  bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
-
-  // State-dependent versions of the above that consider subtyping between
-  // a constant and its map class.
-  inline static TypeHandle NowOf(i::Object* value, Region* region);
-  static TypeHandle NowOf(i::Handle<i::Object> value, Region* region) {
-    return NowOf(*value, region);
-  }
-  bool NowIs(TypeImpl* that);
-  template<class TypeHandle>
-  bool NowIs(TypeHandle that)  { return this->NowIs(*that); }
-  inline bool NowContains(i::Object* val);
-  bool NowContains(i::Handle<i::Object> val) { return this->NowContains(*val); }
-
-  bool NowStable();
-
-  // Inspection.
-
-  bool IsRange() { return Config::is_range(this); }
-  bool IsClass() {
-    return Config::is_class(this)
-        || Config::is_struct(this, StructuralType::kClassTag);
-  }
-  bool IsConstant() {
-    return Config::is_struct(this, StructuralType::kConstantTag);
-  }
-  bool IsContext() {
-    return Config::is_struct(this, StructuralType::kContextTag);
-  }
-  bool IsArray() {
-    return Config::is_struct(this, StructuralType::kArrayTag);
-  }
-  bool IsFunction() {
-    return Config::is_struct(this, StructuralType::kFunctionTag);
-  }
-
-  ClassType* AsClass() { return ClassType::cast(this); }
-  ConstantType* AsConstant() { return ConstantType::cast(this); }
-  RangeType* AsRange() { return RangeType::cast(this); }
-  ContextType* AsContext() { return ContextType::cast(this); }
-  ArrayType* AsArray() { return ArrayType::cast(this); }
-  FunctionType* AsFunction() { return FunctionType::cast(this); }
-
-  // Minimum and maximum of a numeric type.
-  // These functions do not distinguish between -0 and +0.  If the type equals
-  // kNaN, they return NaN; otherwise kNaN is ignored.  Only call these
-  // functions on subtypes of Number.
-  double Min();
-  double Max();
-
-  // Extracts a range from the type: if the type is a range or a union
-  // containing a range, that range is returned; otherwise, NULL is returned.
-  RangeType* GetRange();
-
-  static bool IsInteger(double x) {
-    return nearbyint(x) == x && !i::IsMinusZero(x);  // Allows for infinities.
-  }
-  static bool IsInteger(i::Object* x) {
-    return x->IsNumber() && IsInteger(x->Number());
-  }
-
-  int NumClasses();
-  int NumConstants();
-
-  template<class T> class Iterator;
-  Iterator<i::Map> Classes() {
-    if (this->IsBitset()) return Iterator<i::Map>();
-    return Iterator<i::Map>(Config::handle(this));
-  }
-  Iterator<i::Object> Constants() {
-    if (this->IsBitset()) return Iterator<i::Object>();
-    return Iterator<i::Object>(Config::handle(this));
-  }
-
-  // Casting and conversion.
-
-  static inline TypeImpl* cast(typename Config::Base* object);
-
-  template<class OtherTypeImpl>
-  static TypeHandle Convert(
-      typename OtherTypeImpl::TypeHandle type, Region* region);
-
-  // Printing.
-
-  enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
-
-  void PrintTo(std::ostream& os, PrintDimension dim = BOTH_DIMS);  // NOLINT
-
-#ifdef DEBUG
-  void Print();
-#endif
-
-  bool IsUnionForTesting() { return IsUnion(); }
-
- protected:
-  // Friends.
-
-  template<class> friend class Iterator;
-  template<class> friend class TypeImpl;
-
-  // Handle conversion.
-
-  template<class T>
-  static typename Config::template Handle<T>::type handle(T* type) {
-    return Config::handle(type);
-  }
-  TypeImpl* unhandle() { return this; }
-
-  // Internal inspection.
-
-  bool IsNone() { return this == None(); }
-  bool IsAny() { return this == Any(); }
-  bool IsBitset() { return Config::is_bitset(this); }
-  bool IsUnion() { return Config::is_struct(this, StructuralType::kUnionTag); }
-
-  bitset AsBitset() {
-    DCHECK(this->IsBitset());
-    return static_cast<BitsetType*>(this)->Bitset();
-  }
-  UnionType* AsUnion() { return UnionType::cast(this); }
-
-  bitset Representation();
-
-  // Auxiliary functions.
-  bool SemanticMaybe(TypeImpl* that);
-
-  bitset BitsetGlb() { return BitsetType::Glb(this); }
-  bitset BitsetLub() { return BitsetType::Lub(this); }
-
-  bool SlowIs(TypeImpl* that);
-  bool SemanticIs(TypeImpl* that);
-
-  struct Limits {
-    double min;
-    double max;
-    Limits(double min, double max) : min(min), max(max) {}
-    explicit Limits(RangeType* range) : min(range->Min()), max(range->Max()) {}
-    bool IsEmpty();
-    static Limits Empty() { return Limits(1, 0); }
-    static Limits Intersect(Limits lhs, Limits rhs);
-    static Limits Union(Limits lhs, Limits rhs);
-  };
-
-  static bool Overlap(RangeType* lhs, RangeType* rhs);
-  static bool Contains(RangeType* lhs, RangeType* rhs);
-  static bool Contains(RangeType* range, ConstantType* constant);
-  static bool Contains(RangeType* range, i::Object* val);
-
-  static int UpdateRange(
-      RangeHandle type, UnionHandle result, int size, Region* region);
-
-  static Limits IntersectRangeAndBitset(TypeHandle range, TypeHandle bits,
-                                        Region* region);
-  static Limits ToLimits(bitset bits, Region* region);
-
-  bool SimplyEquals(TypeImpl* that);
-  template<class TypeHandle>
-  bool SimplyEquals(TypeHandle that) { return this->SimplyEquals(*that); }
-
-  static int AddToUnion(
-      TypeHandle type, UnionHandle result, int size, Region* region);
-  static int IntersectAux(TypeHandle type, TypeHandle other, UnionHandle result,
-                          int size, Limits* limits, Region* region);
-  static TypeHandle NormalizeUnion(UnionHandle unioned, int size,
-                                   Region* region);
-  static TypeHandle NormalizeRangeAndBitset(RangeHandle range, bitset* bits,
-                                            Region* region);
-};
-
+class Type;
 
 // -----------------------------------------------------------------------------
 // Bitset types (internal).
 
-template<class Config>
-class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
- protected:
-  friend class TypeImpl<Config>;
+class BitsetType {
+ public:
+  typedef uint32_t bitset;  // Internal
 
   enum : uint32_t {
-    #define DECLARE_TYPE(type, value) k##type = (value),
+#define DECLARE_TYPE(type, value) k##type = (value),
     BITSET_TYPE_LIST(DECLARE_TYPE)
-    #undef DECLARE_TYPE
-    kUnusedEOL = 0
+#undef DECLARE_TYPE
+        kUnusedEOL = 0
   };
 
   static bitset SignedSmall();
   static bitset UnsignedSmall();
 
-  bitset Bitset() { return Config::as_bitset(this); }
-
-  static TypeImpl* New(bitset bits) {
-    return Config::from_bitset(bits);
-  }
-  static TypeHandle New(bitset bits, Region* region) {
-    return Config::from_bitset(bits, region);
+  bitset Bitset() {
+    return static_cast<bitset>(reinterpret_cast<uintptr_t>(this) ^ 1u);
   }
 
   static bool IsInhabited(bitset bits) {
@@ -678,9 +298,9 @@
   static double Min(bitset);
   static double Max(bitset);
 
-  static bitset Glb(TypeImpl* type);  // greatest lower bound that's a bitset
+  static bitset Glb(Type* type);  // greatest lower bound that's a bitset
   static bitset Glb(double min, double max);
-  static bitset Lub(TypeImpl* type);  // least upper bound that's a bitset
+  static bitset Lub(Type* type);  // least upper bound that's a bitset
   static bitset Lub(i::Map* map);
   static bitset Lub(i::Object* value);
   static bitset Lub(double value);
@@ -695,7 +315,19 @@
 
   static bitset NumberBits(bitset bits);
 
+  static bool IsBitset(Type* type) {
+    return reinterpret_cast<uintptr_t>(type) & 1;
+  }
+
+  static Type* NewForTesting(bitset bits) { return New(bits); }
+
  private:
+  friend class Type;
+
+  static Type* New(bitset bits) {
+    return reinterpret_cast<Type*>(static_cast<uintptr_t>(bits | 1u));
+  }
+
   struct Boundary {
     bitset internal;
     bitset external;
@@ -706,57 +338,302 @@
   static inline size_t BoundariesSize();
 };
 
-
 // -----------------------------------------------------------------------------
 // Superclass for non-bitset types (internal).
-// Contains a tag and a variable number of type or value fields.
-
-template<class Config>
-class TypeImpl<Config>::StructuralType : public TypeImpl<Config> {
+class TypeBase {
  protected:
-  template<class> friend class TypeImpl;
-  friend struct ZoneTypeConfig;  // For tags.
-  friend struct HeapTypeConfig;
+  friend class Type;
 
-  enum Tag {
-    kClassTag,
-    kConstantTag,
-    kContextTag,
-    kArrayTag,
-    kFunctionTag,
-    kUnionTag
+  enum Kind {
+    kClass,
+    kConstant,
+    kContext,
+    kArray,
+    kFunction,
+    kTuple,
+    kUnion,
+    kRange
   };
 
-  int Length() {
-    return Config::struct_length(Config::as_struct(this));
-  }
-  TypeHandle Get(int i) {
-    DCHECK(0 <= i && i < this->Length());
-    return Config::struct_get(Config::as_struct(this), i);
-  }
-  void Set(int i, TypeHandle type) {
-    DCHECK(0 <= i && i < this->Length());
-    Config::struct_set(Config::as_struct(this), i, type);
-  }
-  void Shrink(int length) {
-    DCHECK(2 <= length && length <= this->Length());
-    Config::struct_shrink(Config::as_struct(this), length);
-  }
-  template<class V> i::Handle<V> GetValue(int i) {
-    DCHECK(0 <= i && i < this->Length());
-    return Config::template struct_get_value<V>(Config::as_struct(this), i);
-  }
-  template<class V> void SetValue(int i, i::Handle<V> x) {
-    DCHECK(0 <= i && i < this->Length());
-    Config::struct_set_value(Config::as_struct(this), i, x);
+  Kind kind() const { return kind_; }
+  explicit TypeBase(Kind kind) : kind_(kind) {}
+
+  static bool IsKind(Type* type, Kind kind) {
+    if (BitsetType::IsBitset(type)) return false;
+    TypeBase* base = reinterpret_cast<TypeBase*>(type);
+    return base->kind() == kind;
   }
 
-  static TypeHandle New(Tag tag, int length, Region* region) {
-    DCHECK(1 <= length);
-    return Config::from_struct(Config::struct_create(tag, length, region));
+  // The hacky conversion to/from Type*.
+  static Type* AsType(TypeBase* type) { return reinterpret_cast<Type*>(type); }
+  static TypeBase* FromType(Type* type) {
+    return reinterpret_cast<TypeBase*>(type);
+  }
+
+ private:
+  Kind kind_;
+};
+
+// -----------------------------------------------------------------------------
+// Class types.
+
+class ClassType : public TypeBase {
+ public:
+  i::Handle<i::Map> Map() { return map_; }
+
+ private:
+  friend class Type;
+  friend class BitsetType;
+
+  static Type* New(i::Handle<i::Map> map, Zone* zone) {
+    return AsType(new (zone->New(sizeof(ClassType)))
+                      ClassType(BitsetType::Lub(*map), map));
+  }
+
+  static ClassType* cast(Type* type) {
+    DCHECK(IsKind(type, kClass));
+    return static_cast<ClassType*>(FromType(type));
+  }
+
+  ClassType(BitsetType::bitset bitset, i::Handle<i::Map> map)
+      : TypeBase(kClass), bitset_(bitset), map_(map) {}
+
+  BitsetType::bitset Lub() { return bitset_; }
+
+  BitsetType::bitset bitset_;
+  Handle<i::Map> map_;
+};
+
+// -----------------------------------------------------------------------------
+// Constant types.
+
+class ConstantType : public TypeBase {
+ public:
+  i::Handle<i::Object> Value() { return object_; }
+
+ private:
+  friend class Type;
+  friend class BitsetType;
+
+  static Type* New(i::Handle<i::Object> value, Zone* zone) {
+    BitsetType::bitset bitset = BitsetType::Lub(*value);
+    return AsType(new (zone->New(sizeof(ConstantType)))
+                      ConstantType(bitset, value));
+  }
+
+  static ConstantType* cast(Type* type) {
+    DCHECK(IsKind(type, kConstant));
+    return static_cast<ConstantType*>(FromType(type));
+  }
+
+  ConstantType(BitsetType::bitset bitset, i::Handle<i::Object> object)
+      : TypeBase(kConstant), bitset_(bitset), object_(object) {}
+
+  BitsetType::bitset Lub() { return bitset_; }
+
+  BitsetType::bitset bitset_;
+  Handle<i::Object> object_;
+};
+// TODO(neis): Also cache value if numerical.
+// TODO(neis): Allow restricting the representation.
+
+// -----------------------------------------------------------------------------
+// Range types.
+
+class RangeType : public TypeBase {
+ public:
+  struct Limits {
+    double min;
+    double max;
+    Limits(double min, double max) : min(min), max(max) {}
+    explicit Limits(RangeType* range) : min(range->Min()), max(range->Max()) {}
+    bool IsEmpty();
+    static Limits Empty() { return Limits(1, 0); }
+    static Limits Intersect(Limits lhs, Limits rhs);
+    static Limits Union(Limits lhs, Limits rhs);
+  };
+
+  double Min() { return limits_.min; }
+  double Max() { return limits_.max; }
+
+ private:
+  friend class Type;
+  friend class BitsetType;
+  friend class UnionType;
+
+  static Type* New(double min, double max, BitsetType::bitset representation,
+                   Zone* zone) {
+    return New(Limits(min, max), representation, zone);
+  }
+
+  static bool IsInteger(double x) {
+    return nearbyint(x) == x && !i::IsMinusZero(x);  // Allows for infinities.
+  }
+
+  static Type* New(Limits lim, BitsetType::bitset representation, Zone* zone) {
+    DCHECK(IsInteger(lim.min) && IsInteger(lim.max));
+    DCHECK(lim.min <= lim.max);
+    DCHECK(REPRESENTATION(representation) == representation);
+    BitsetType::bitset bits =
+        SEMANTIC(BitsetType::Lub(lim.min, lim.max)) | representation;
+
+    return AsType(new (zone->New(sizeof(RangeType))) RangeType(bits, lim));
+  }
+
+  static RangeType* cast(Type* type) {
+    DCHECK(IsKind(type, kRange));
+    return static_cast<RangeType*>(FromType(type));
+  }
+
+  RangeType(BitsetType::bitset bitset, Limits limits)
+      : TypeBase(kRange), bitset_(bitset), limits_(limits) {}
+
+  BitsetType::bitset Lub() { return bitset_; }
+
+  BitsetType::bitset bitset_;
+  Limits limits_;
+};
+
+// -----------------------------------------------------------------------------
+// Context types.
+
+class ContextType : public TypeBase {
+ public:
+  Type* Outer() { return outer_; }
+
+ private:
+  friend class Type;
+
+  static Type* New(Type* outer, Zone* zone) {
+    return AsType(new (zone->New(sizeof(ContextType))) ContextType(outer));
+  }
+
+  static ContextType* cast(Type* type) {
+    DCHECK(IsKind(type, kContext));
+    return static_cast<ContextType*>(FromType(type));
+  }
+
+  explicit ContextType(Type* outer) : TypeBase(kContext), outer_(outer) {}
+
+  Type* outer_;
+};
+
+// -----------------------------------------------------------------------------
+// Array types.
+
+class ArrayType : public TypeBase {
+ public:
+  Type* Element() { return element_; }
+
+ private:
+  friend class Type;
+
+  explicit ArrayType(Type* element) : TypeBase(kArray), element_(element) {}
+
+  static Type* New(Type* element, Zone* zone) {
+    return AsType(new (zone->New(sizeof(ArrayType))) ArrayType(element));
+  }
+
+  static ArrayType* cast(Type* type) {
+    DCHECK(IsKind(type, kArray));
+    return static_cast<ArrayType*>(FromType(type));
+  }
+
+  Type* element_;
+};
+
+// -----------------------------------------------------------------------------
+// Superclass for types with variable number of type fields.
+class StructuralType : public TypeBase {
+ public:
+  int LengthForTesting() { return Length(); }
+
+ protected:
+  friend class Type;
+
+  int Length() { return length_; }
+
+  Type* Get(int i) {
+    DCHECK(0 <= i && i < this->Length());
+    return elements_[i];
+  }
+
+  void Set(int i, Type* type) {
+    DCHECK(0 <= i && i < this->Length());
+    elements_[i] = type;
+  }
+
+  void Shrink(int length) {
+    DCHECK(2 <= length && length <= this->Length());
+    length_ = length;
+  }
+
+  StructuralType(Kind kind, int length, i::Zone* zone)
+      : TypeBase(kind), length_(length) {
+    elements_ = reinterpret_cast<Type**>(zone->New(sizeof(Type*) * length));
+  }
+
+ private:
+  int length_;
+  Type** elements_;
+};
+
+// -----------------------------------------------------------------------------
+// Function types.
+
+class FunctionType : public StructuralType {
+ public:
+  int Arity() { return this->Length() - 2; }
+  Type* Result() { return this->Get(0); }
+  Type* Receiver() { return this->Get(1); }
+  Type* Parameter(int i) { return this->Get(2 + i); }
+
+  void InitParameter(int i, Type* type) { this->Set(2 + i, type); }
+
+ private:
+  friend class Type;
+
+  FunctionType(Type* result, Type* receiver, int arity, Zone* zone)
+      : StructuralType(kFunction, 2 + arity, zone) {
+    Set(0, result);
+    Set(1, receiver);
+  }
+
+  static Type* New(Type* result, Type* receiver, int arity, Zone* zone) {
+    return AsType(new (zone->New(sizeof(FunctionType)))
+                      FunctionType(result, receiver, arity, zone));
+  }
+
+  static FunctionType* cast(Type* type) {
+    DCHECK(IsKind(type, kFunction));
+    return static_cast<FunctionType*>(FromType(type));
   }
 };
 
+// -----------------------------------------------------------------------------
+// Tuple types.
+
+class TupleType : public StructuralType {
+ public:
+  int Arity() { return this->Length(); }
+  Type* Element(int i) { return this->Get(i); }
+
+  void InitElement(int i, Type* type) { this->Set(i, type); }
+
+ private:
+  friend class Type;
+
+  TupleType(int length, Zone* zone) : StructuralType(kTuple, length, zone) {}
+
+  static Type* New(int length, Zone* zone) {
+    return AsType(new (zone->New(sizeof(TupleType))) TupleType(length, zone));
+  }
+
+  static TupleType* cast(Type* type) {
+    DCHECK(IsKind(type, kTuple));
+    return static_cast<TupleType*>(FromType(type));
+  }
+};
 
 // -----------------------------------------------------------------------------
 // Union types (internal).
@@ -765,420 +642,329 @@
 // - at most one field is a bitset, and it must go into index 0
 // - no field is a union
 // - no field is a subtype of any other field
-template<class Config>
-class TypeImpl<Config>::UnionType : public StructuralType {
- public:
-  static UnionHandle New(int length, Region* region) {
-    return Config::template cast<UnionType>(
-        StructuralType::New(StructuralType::kUnionTag, length, region));
+class UnionType : public StructuralType {
+ private:
+  friend Type;
+  friend BitsetType;
+
+  UnionType(int length, Zone* zone) : StructuralType(kUnion, length, zone) {}
+
+  static Type* New(int length, Zone* zone) {
+    return AsType(new (zone->New(sizeof(UnionType))) UnionType(length, zone));
   }
 
-  static UnionType* cast(TypeImpl* type) {
-    DCHECK(type->IsUnion());
-    return static_cast<UnionType*>(type);
+  static UnionType* cast(Type* type) {
+    DCHECK(IsKind(type, kUnion));
+    return static_cast<UnionType*>(FromType(type));
   }
 
   bool Wellformed();
 };
 
-
-// -----------------------------------------------------------------------------
-// Class types.
-
-template<class Config>
-class TypeImpl<Config>::ClassType : public StructuralType {
+class Type {
  public:
-  i::Handle<i::Map> Map() {
-    return Config::is_class(this) ? Config::as_class(this) :
-        this->template GetValue<i::Map>(1);
+  typedef BitsetType::bitset bitset;  // Internal
+
+// Constructors.
+#define DEFINE_TYPE_CONSTRUCTOR(type, value) \
+  static Type* type() { return BitsetType::New(BitsetType::k##type); }
+  PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
+#undef DEFINE_TYPE_CONSTRUCTOR
+
+  static Type* SignedSmall() {
+    return BitsetType::New(BitsetType::SignedSmall());
+  }
+  static Type* UnsignedSmall() {
+    return BitsetType::New(BitsetType::UnsignedSmall());
   }
 
-  static ClassHandle New(i::Handle<i::Map> map, Region* region) {
-    ClassHandle type =
-        Config::template cast<ClassType>(Config::from_class(map, region));
-    if (!type->IsClass()) {
-      type = Config::template cast<ClassType>(
-          StructuralType::New(StructuralType::kClassTag, 2, region));
-      type->Set(0, BitsetType::New(BitsetType::Lub(*map), region));
-      type->SetValue(1, map);
+  static Type* Class(i::Handle<i::Map> map, Zone* zone) {
+    return ClassType::New(map, zone);
+  }
+  static Type* Constant(i::Handle<i::Object> value, Zone* zone) {
+    return ConstantType::New(value, zone);
+  }
+  static Type* Range(double min, double max, Zone* zone) {
+    return RangeType::New(min, max, REPRESENTATION(BitsetType::kTagged |
+                                                   BitsetType::kUntaggedNumber),
+                          zone);
+  }
+  static Type* Context(Type* outer, Zone* zone) {
+    return ContextType::New(outer, zone);
+  }
+  static Type* Array(Type* element, Zone* zone) {
+    return ArrayType::New(element, zone);
+  }
+  static Type* Function(Type* result, Type* receiver, int arity, Zone* zone) {
+    return FunctionType::New(result, receiver, arity, zone);
+  }
+  static Type* Function(Type* result, Zone* zone) {
+    return Function(result, Any(), 0, zone);
+  }
+  static Type* Function(Type* result, Type* param0, Zone* zone) {
+    Type* function = Function(result, Any(), 1, zone);
+    function->AsFunction()->InitParameter(0, param0);
+    return function;
+  }
+  static Type* Function(Type* result, Type* param0, Type* param1, Zone* zone) {
+    Type* function = Function(result, Any(), 2, zone);
+    function->AsFunction()->InitParameter(0, param0);
+    function->AsFunction()->InitParameter(1, param1);
+    return function;
+  }
+  static Type* Function(Type* result, Type* param0, Type* param1, Type* param2,
+                        Zone* zone) {
+    Type* function = Function(result, Any(), 3, zone);
+    function->AsFunction()->InitParameter(0, param0);
+    function->AsFunction()->InitParameter(1, param1);
+    function->AsFunction()->InitParameter(2, param2);
+    return function;
+  }
+  static Type* Function(Type* result, int arity, Type** params, Zone* zone) {
+    Type* function = Function(result, Any(), arity, zone);
+    for (int i = 0; i < arity; ++i) {
+      function->AsFunction()->InitParameter(i, params[i]);
     }
-    return type;
+    return function;
+  }
+  static Type* Tuple(Type* first, Type* second, Type* third, Zone* zone) {
+    Type* tuple = TupleType::New(3, zone);
+    tuple->AsTuple()->InitElement(0, first);
+    tuple->AsTuple()->InitElement(1, second);
+    tuple->AsTuple()->InitElement(2, third);
+    return tuple;
   }
 
-  static ClassType* cast(TypeImpl* type) {
-    DCHECK(type->IsClass());
-    return static_cast<ClassType*>(type);
+#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
+  static Type* Name(Isolate* isolate, Zone* zone);
+  SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
+#undef CONSTRUCT_SIMD_TYPE
+
+  static Type* Union(Type* type1, Type* type2, Zone* reg);
+  static Type* Intersect(Type* type1, Type* type2, Zone* reg);
+
+  static Type* Of(double value, Zone* zone) {
+    return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(value)));
+  }
+  static Type* Of(i::Object* value, Zone* zone) {
+    return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(value)));
+  }
+  static Type* Of(i::Handle<i::Object> value, Zone* zone) {
+    return Of(*value, zone);
   }
 
- private:
-  template<class> friend class TypeImpl;
-  bitset Lub() {
-    return Config::is_class(this) ?
-        BitsetType::Lub(*Config::as_class(this)) :
-        this->Get(0)->AsBitset();
+  // Extraction of components.
+  static Type* Representation(Type* t, Zone* zone);
+  static Type* Semantic(Type* t, Zone* zone);
+
+  // Predicates.
+  bool IsInhabited() { return BitsetType::IsInhabited(this->BitsetLub()); }
+
+  bool Is(Type* that) { return this == that || this->SlowIs(that); }
+  bool Maybe(Type* that);
+  bool Equals(Type* that) { return this->Is(that) && that->Is(this); }
+
+  // Equivalent to Constant(val)->Is(this), but avoiding allocation.
+  bool Contains(i::Object* val);
+  bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
+
+  // State-dependent versions of the above that consider subtyping between
+  // a constant and its map class.
+  static Type* NowOf(i::Object* value, Zone* zone);
+  static Type* NowOf(i::Handle<i::Object> value, Zone* zone) {
+    return NowOf(*value, zone);
   }
-};
+  bool NowIs(Type* that);
+  bool NowContains(i::Object* val);
+  bool NowContains(i::Handle<i::Object> val) { return this->NowContains(*val); }
 
+  bool NowStable();
 
-// -----------------------------------------------------------------------------
-// Constant types.
+  // Inspection.
+  bool IsRange() { return IsKind(TypeBase::kRange); }
+  bool IsClass() { return IsKind(TypeBase::kClass); }
+  bool IsConstant() { return IsKind(TypeBase::kConstant); }
+  bool IsContext() { return IsKind(TypeBase::kContext); }
+  bool IsArray() { return IsKind(TypeBase::kArray); }
+  bool IsFunction() { return IsKind(TypeBase::kFunction); }
+  bool IsTuple() { return IsKind(TypeBase::kTuple); }
 
-template<class Config>
-class TypeImpl<Config>::ConstantType : public StructuralType {
- public:
-  i::Handle<i::Object> Value() { return this->template GetValue<i::Object>(1); }
+  ClassType* AsClass() { return ClassType::cast(this); }
+  ConstantType* AsConstant() { return ConstantType::cast(this); }
+  RangeType* AsRange() { return RangeType::cast(this); }
+  ContextType* AsContext() { return ContextType::cast(this); }
+  ArrayType* AsArray() { return ArrayType::cast(this); }
+  FunctionType* AsFunction() { return FunctionType::cast(this); }
+  TupleType* AsTuple() { return TupleType::cast(this); }
 
-  static ConstantHandle New(i::Handle<i::Object> value, Region* region) {
-    ConstantHandle type = Config::template cast<ConstantType>(
-        StructuralType::New(StructuralType::kConstantTag, 2, region));
-    type->Set(0, BitsetType::New(BitsetType::Lub(*value), region));
-    type->SetValue(1, value);
-    return type;
+  // Minimum and maximum of a numeric type.
+  // These functions do not distinguish between -0 and +0.  If the type equals
+  // kNaN, they return NaN; otherwise kNaN is ignored.  Only call these
+  // functions on subtypes of Number.
+  double Min();
+  double Max();
+
+  // Extracts a range from the type: if the type is a range or a union
+  // containing a range, that range is returned; otherwise, NULL is returned.
+  Type* GetRange();
+
+  static bool IsInteger(i::Object* x);
+  static bool IsInteger(double x) {
+    return nearbyint(x) == x && !i::IsMinusZero(x);  // Allows for infinities.
   }
 
-  static ConstantType* cast(TypeImpl* type) {
-    DCHECK(type->IsConstant());
-    return static_cast<ConstantType*>(type);
-  }
+  int NumClasses();
+  int NumConstants();
 
- private:
-  template<class> friend class TypeImpl;
-  bitset Lub() { return this->Get(0)->AsBitset(); }
-};
-// TODO(neis): Also cache value if numerical.
-// TODO(neis): Allow restricting the representation.
+  template <class T>
+  class Iterator {
+   public:
+    bool Done() const { return index_ < 0; }
+    i::Handle<T> Current();
+    void Advance();
 
+   private:
+    friend class Type;
 
-// -----------------------------------------------------------------------------
-// Range types.
+    Iterator() : index_(-1) {}
+    explicit Iterator(Type* type) : type_(type), index_(-1) { Advance(); }
 
-template <class Config>
-class TypeImpl<Config>::RangeType : public TypeImpl<Config> {
- public:
-  double Min() { return Config::range_get_double(Config::as_range(this), 0); }
-  double Max() { return Config::range_get_double(Config::as_range(this), 1); }
+    inline bool matches(Type* type);
+    inline Type* get_type();
 
-  static RangeHandle New(double min, double max, TypeHandle representation,
-                         Region* region) {
-    DCHECK(IsInteger(min) && IsInteger(max));
-    DCHECK(min <= max);
-    bitset representation_bits = representation->AsBitset();
-    DCHECK(REPRESENTATION(representation_bits) == representation_bits);
-
-    typename Config::template Handle<typename Config::Range>::type range =
-        Config::range_create(region);
-
-    bitset bits = SEMANTIC(BitsetType::Lub(min, max)) | representation_bits;
-    Config::range_set_bitset(range, bits);
-    Config::range_set_double(range, 0, min, region);
-    Config::range_set_double(range, 1, max, region);
-    return Config::template cast<RangeType>(Config::from_range(range));
-  }
-
-  static RangeHandle New(Limits lim, bitset representation, Region* region) {
-    return New(lim.min, lim.max, BitsetType::New(representation, region),
-               region);
-  }
-
-  static RangeType* cast(TypeImpl* type) {
-    DCHECK(type->IsRange());
-    return static_cast<RangeType*>(type);
-  }
-
- private:
-  template<class> friend class TypeImpl;
-  bitset Lub() {
-    return Config::range_get_bitset(Config::as_range(this));
-  }
-};
-
-
-// -----------------------------------------------------------------------------
-// Context types.
-
-template<class Config>
-class TypeImpl<Config>::ContextType : public StructuralType {
- public:
-  TypeHandle Outer() { return this->Get(0); }
-
-  static ContextHandle New(TypeHandle outer, Region* region) {
-    ContextHandle type = Config::template cast<ContextType>(
-        StructuralType::New(StructuralType::kContextTag, 1, region));
-    type->Set(0, outer);
-    return type;
-  }
-
-  static ContextType* cast(TypeImpl* type) {
-    DCHECK(type->IsContext());
-    return static_cast<ContextType*>(type);
-  }
-};
-
-
-// -----------------------------------------------------------------------------
-// Array types.
-
-template<class Config>
-class TypeImpl<Config>::ArrayType : public StructuralType {
- public:
-  TypeHandle Element() { return this->Get(0); }
-
-  static ArrayHandle New(TypeHandle element, Region* region) {
-    ArrayHandle type = Config::template cast<ArrayType>(
-        StructuralType::New(StructuralType::kArrayTag, 1, region));
-    type->Set(0, element);
-    return type;
-  }
-
-  static ArrayType* cast(TypeImpl* type) {
-    DCHECK(type->IsArray());
-    return static_cast<ArrayType*>(type);
-  }
-};
-
-
-// -----------------------------------------------------------------------------
-// Function types.
-
-template<class Config>
-class TypeImpl<Config>::FunctionType : public StructuralType {
- public:
-  int Arity() { return this->Length() - 2; }
-  TypeHandle Result() { return this->Get(0); }
-  TypeHandle Receiver() { return this->Get(1); }
-  TypeHandle Parameter(int i) { return this->Get(2 + i); }
-
-  void InitParameter(int i, TypeHandle type) { this->Set(2 + i, type); }
-
-  static FunctionHandle New(
-      TypeHandle result, TypeHandle receiver, int arity, Region* region) {
-    FunctionHandle type = Config::template cast<FunctionType>(
-        StructuralType::New(StructuralType::kFunctionTag, 2 + arity, region));
-    type->Set(0, result);
-    type->Set(1, receiver);
-    return type;
-  }
-
-  static FunctionType* cast(TypeImpl* type) {
-    DCHECK(type->IsFunction());
-    return static_cast<FunctionType*>(type);
-  }
-};
-
-
-// -----------------------------------------------------------------------------
-// Type iterators.
-
-template<class Config> template<class T>
-class TypeImpl<Config>::Iterator {
- public:
-  bool Done() const { return index_ < 0; }
-  i::Handle<T> Current();
-  void Advance();
-
- private:
-  template<class> friend class TypeImpl;
-
-  Iterator() : index_(-1) {}
-  explicit Iterator(TypeHandle type) : type_(type), index_(-1) {
-    Advance();
-  }
-
-  inline bool matches(TypeHandle type);
-  inline TypeHandle get_type();
-
-  TypeHandle type_;
-  int index_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Zone-allocated types; they are either (odd) integers to represent bitsets, or
-// (even) pointers to structures for everything else.
-
-struct ZoneTypeConfig {
-  typedef TypeImpl<ZoneTypeConfig> Type;
-  class Base {};
-  typedef void* Struct;
-  // Hack: the Struct and Range types can be aliased in memory, the first
-  // pointer word of each both must be the tag (kRangeStructTag for Range,
-  // anything else for Struct) so that we can differentiate them.
-  struct Range {
-    void* tag;
-    int bitset;
-    double limits[2];
+    Type* type_;
+    int index_;
   };
-  typedef i::Zone Region;
-  template<class T> struct Handle { typedef T* type; };
 
-  static const int kRangeStructTag = 0x1000;
-
-  template<class T> static inline T* null_handle() { return nullptr; }
-  template<class T> static inline T* handle(T* type);
-  template<class T> static inline T* cast(Type* type);
-
-  static inline bool is_bitset(Type* type);
-  static inline bool is_class(Type* type);
-  static inline bool is_struct(Type* type, int tag);
-  static inline bool is_range(Type* type);
-
-  static inline Type::bitset as_bitset(Type* type);
-  static inline i::Handle<i::Map> as_class(Type* type);
-  static inline Struct* as_struct(Type* type);
-  static inline Range* as_range(Type* type);
-
-  static inline Type* from_bitset(Type::bitset);
-  static inline Type* from_bitset(Type::bitset, Zone* zone);
-  static inline Type* from_class(i::Handle<i::Map> map, Zone* zone);
-  static inline Type* from_struct(Struct* structured);
-  static inline Type* from_range(Range* range);
-
-  static inline Struct* struct_create(int tag, int length, Zone* zone);
-  static inline void struct_shrink(Struct* structure, int length);
-  static inline int struct_tag(Struct* structure);
-  static inline int struct_length(Struct* structure);
-  static inline Type* struct_get(Struct* structure, int i);
-  static inline void struct_set(Struct* structure, int i, Type* type);
-  template<class V>
-  static inline i::Handle<V> struct_get_value(Struct* structure, int i);
-  template<class V> static inline void struct_set_value(
-      Struct* structure, int i, i::Handle<V> x);
-
-  static inline Range* range_create(Zone* zone);
-  static inline int range_get_bitset(Range* range);
-  static inline void range_set_bitset(Range* range, int);
-  static inline double range_get_double(Range*, int index);
-  static inline void range_set_double(Range*, int index, double value, Zone*);
-};
-
-typedef TypeImpl<ZoneTypeConfig> Type;
-
-
-// -----------------------------------------------------------------------------
-// Heap-allocated types; either smis for bitsets, maps for classes, boxes for
-// constants, or fixed arrays for unions.
-
-struct HeapTypeConfig {
-  typedef TypeImpl<HeapTypeConfig> Type;
-  typedef i::Object Base;
-  typedef i::FixedArray Struct;
-  typedef i::FixedArray Range;
-  typedef i::Isolate Region;
-  template<class T> struct Handle { typedef i::Handle<T> type; };
-
-  static const int kRangeStructTag = 0xffff;
-
-  template<class T> static inline i::Handle<T> null_handle() {
-    return i::Handle<T>();
+  Iterator<i::Map> Classes() {
+    if (this->IsBitset()) return Iterator<i::Map>();
+    return Iterator<i::Map>(this);
   }
-  template<class T> static inline i::Handle<T> handle(T* type);
-  template<class T> static inline i::Handle<T> cast(i::Handle<Type> type);
+  Iterator<i::Object> Constants() {
+    if (this->IsBitset()) return Iterator<i::Object>();
+    return Iterator<i::Object>(this);
+  }
 
-  static inline bool is_bitset(Type* type);
-  static inline bool is_class(Type* type);
-  static inline bool is_struct(Type* type, int tag);
-  static inline bool is_range(Type* type);
+  // Printing.
 
-  static inline Type::bitset as_bitset(Type* type);
-  static inline i::Handle<i::Map> as_class(Type* type);
-  static inline i::Handle<Struct> as_struct(Type* type);
-  static inline i::Handle<Range> as_range(Type* type);
+  enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
 
-  static inline Type* from_bitset(Type::bitset);
-  static inline i::Handle<Type> from_bitset(Type::bitset, Isolate* isolate);
-  static inline i::Handle<Type> from_class(
-      i::Handle<i::Map> map, Isolate* isolate);
-  static inline i::Handle<Type> from_struct(i::Handle<Struct> structure);
-  static inline i::Handle<Type> from_range(i::Handle<Range> range);
+  void PrintTo(std::ostream& os, PrintDimension dim = BOTH_DIMS);  // NOLINT
 
-  static inline i::Handle<Struct> struct_create(
-      int tag, int length, Isolate* isolate);
-  static inline void struct_shrink(i::Handle<Struct> structure, int length);
-  static inline int struct_tag(i::Handle<Struct> structure);
-  static inline int struct_length(i::Handle<Struct> structure);
-  static inline i::Handle<Type> struct_get(i::Handle<Struct> structure, int i);
-  static inline void struct_set(
-      i::Handle<Struct> structure, int i, i::Handle<Type> type);
-  template<class V>
-  static inline i::Handle<V> struct_get_value(
-      i::Handle<Struct> structure, int i);
-  template<class V>
-  static inline void struct_set_value(
-      i::Handle<Struct> structure, int i, i::Handle<V> x);
+#ifdef DEBUG
+  void Print();
+#endif
 
-  static inline i::Handle<Range> range_create(Isolate* isolate);
-  static inline int range_get_bitset(i::Handle<Range> range);
-  static inline void range_set_bitset(i::Handle<Range> range, int value);
-  static inline double range_get_double(i::Handle<Range> range, int index);
-  static inline void range_set_double(i::Handle<Range> range, int index,
-                                      double value, Isolate* isolate);
+  // Helpers for testing.
+  bool IsBitsetForTesting() { return IsBitset(); }
+  bool IsUnionForTesting() { return IsUnion(); }
+  bitset AsBitsetForTesting() { return AsBitset(); }
+  UnionType* AsUnionForTesting() { return AsUnion(); }
+
+ private:
+  // Friends.
+  template <class>
+  friend class Iterator;
+  friend BitsetType;
+  friend UnionType;
+
+  // Internal inspection.
+  bool IsKind(TypeBase::Kind kind) { return TypeBase::IsKind(this, kind); }
+
+  bool IsNone() { return this == None(); }
+  bool IsAny() { return this == Any(); }
+  bool IsBitset() { return BitsetType::IsBitset(this); }
+  bool IsUnion() { return IsKind(TypeBase::kUnion); }
+
+  bitset AsBitset() {
+    DCHECK(this->IsBitset());
+    return reinterpret_cast<BitsetType*>(this)->Bitset();
+  }
+  UnionType* AsUnion() { return UnionType::cast(this); }
+
+  bitset Representation();
+
+  // Auxiliary functions.
+  bool SemanticMaybe(Type* that);
+
+  bitset BitsetGlb() { return BitsetType::Glb(this); }
+  bitset BitsetLub() { return BitsetType::Lub(this); }
+
+  bool SlowIs(Type* that);
+  bool SemanticIs(Type* that);
+
+  static bool Overlap(RangeType* lhs, RangeType* rhs);
+  static bool Contains(RangeType* lhs, RangeType* rhs);
+  static bool Contains(RangeType* range, ConstantType* constant);
+  static bool Contains(RangeType* range, i::Object* val);
+
+  static int UpdateRange(Type* type, UnionType* result, int size, Zone* zone);
+
+  static RangeType::Limits IntersectRangeAndBitset(Type* range, Type* bits,
+                                                   Zone* zone);
+  static RangeType::Limits ToLimits(bitset bits, Zone* zone);
+
+  bool SimplyEquals(Type* that);
+
+  static int AddToUnion(Type* type, UnionType* result, int size, Zone* zone);
+  static int IntersectAux(Type* type, Type* other, UnionType* result, int size,
+                          RangeType::Limits* limits, Zone* zone);
+  static Type* NormalizeUnion(Type* unioned, int size, Zone* zone);
+  static Type* NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone);
 };
 
-typedef TypeImpl<HeapTypeConfig> HeapType;
-
-
 // -----------------------------------------------------------------------------
 // Type bounds. A simple struct to represent a pair of lower/upper types.
 
-template<class Config>
-struct BoundsImpl {
-  typedef TypeImpl<Config> Type;
-  typedef typename Type::TypeHandle TypeHandle;
-  typedef typename Type::Region Region;
+struct Bounds {
+  Type* lower;
+  Type* upper;
 
-  TypeHandle lower;
-  TypeHandle upper;
-
-  BoundsImpl() :  // Make sure accessing uninitialized bounds crashes big-time.
-    lower(Config::template null_handle<Type>()),
-    upper(Config::template null_handle<Type>()) {}
-  explicit BoundsImpl(TypeHandle t) : lower(t), upper(t) {}
-  BoundsImpl(TypeHandle l, TypeHandle u) : lower(l), upper(u) {
-    DCHECK(lower->Is(upper));
-  }
+  Bounds()
+      :  // Make sure accessing uninitialized bounds crashes big-time.
+        lower(nullptr),
+        upper(nullptr) {}
+  explicit Bounds(Type* t) : lower(t), upper(t) {}
+  Bounds(Type* l, Type* u) : lower(l), upper(u) { DCHECK(lower->Is(upper)); }
 
   // Unrestricted bounds.
-  static BoundsImpl Unbounded() {
-    return BoundsImpl(Type::None(), Type::Any());
-  }
+  static Bounds Unbounded() { return Bounds(Type::None(), Type::Any()); }
 
   // Meet: both b1 and b2 are known to hold.
-  static BoundsImpl Both(BoundsImpl b1, BoundsImpl b2, Region* region) {
-    TypeHandle lower = Type::Union(b1.lower, b2.lower, region);
-    TypeHandle upper = Type::Intersect(b1.upper, b2.upper, region);
+  static Bounds Both(Bounds b1, Bounds b2, Zone* zone) {
+    Type* lower = Type::Union(b1.lower, b2.lower, zone);
+    Type* upper = Type::Intersect(b1.upper, b2.upper, zone);
     // Lower bounds are considered approximate, correct as necessary.
     if (!lower->Is(upper)) lower = upper;
-    return BoundsImpl(lower, upper);
+    return Bounds(lower, upper);
   }
 
   // Join: either b1 or b2 is known to hold.
-  static BoundsImpl Either(BoundsImpl b1, BoundsImpl b2, Region* region) {
-    TypeHandle lower = Type::Intersect(b1.lower, b2.lower, region);
-    TypeHandle upper = Type::Union(b1.upper, b2.upper, region);
-    return BoundsImpl(lower, upper);
+  static Bounds Either(Bounds b1, Bounds b2, Zone* zone) {
+    Type* lower = Type::Intersect(b1.lower, b2.lower, zone);
+    Type* upper = Type::Union(b1.upper, b2.upper, zone);
+    return Bounds(lower, upper);
   }
 
-  static BoundsImpl NarrowLower(BoundsImpl b, TypeHandle t, Region* region) {
-    TypeHandle lower = Type::Union(b.lower, t, region);
+  static Bounds NarrowLower(Bounds b, Type* t, Zone* zone) {
+    Type* lower = Type::Union(b.lower, t, zone);
     // Lower bounds are considered approximate, correct as necessary.
     if (!lower->Is(b.upper)) lower = b.upper;
-    return BoundsImpl(lower, b.upper);
+    return Bounds(lower, b.upper);
   }
-  static BoundsImpl NarrowUpper(BoundsImpl b, TypeHandle t, Region* region) {
-    TypeHandle lower = b.lower;
-    TypeHandle upper = Type::Intersect(b.upper, t, region);
+  static Bounds NarrowUpper(Bounds b, Type* t, Zone* zone) {
+    Type* lower = b.lower;
+    Type* upper = Type::Intersect(b.upper, t, zone);
     // Lower bounds are considered approximate, correct as necessary.
     if (!lower->Is(upper)) lower = upper;
-    return BoundsImpl(lower, upper);
+    return Bounds(lower, upper);
   }
 
-  bool Narrows(BoundsImpl that) {
+  bool Narrows(Bounds that) {
     return that.lower->Is(this->lower) && this->upper->Is(that.upper);
   }
 };
 
-typedef BoundsImpl<ZoneTypeConfig> Bounds;
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/typing-asm.cc b/src/typing-asm.cc
index 509ba7b..ddb608f 100644
--- a/src/typing-asm.cc
+++ b/src/typing-asm.cc
@@ -36,7 +36,6 @@
     if (!valid_) return;            \
   } while (false)
 
-
 AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
                    FunctionLiteral* root)
     : zone_(zone),
@@ -62,6 +61,7 @@
                            ZoneAllocationPolicy(zone)),
       in_function_(false),
       building_function_tables_(false),
+      visiting_exports_(false),
       cache_(TypeCache::Get()) {
   InitializeAstVisitor(isolate);
   InitializeStdlib();
@@ -89,7 +89,7 @@
   for (int i = 0; i < scope->num_parameters(); ++i) {
     Variable* param = scope->parameter(i);
     DCHECK(GetType(param) == NULL);
-    SetType(param, Type::None(zone()));
+    SetType(param, Type::None());
   }
 
   ZoneList<Declaration*>* decls = scope->declarations();
@@ -126,8 +126,7 @@
   for (int i = 0; i < decls->length(); ++i) {
     FunctionDeclaration* decl = decls->at(i)->AsFunctionDeclaration();
     if (decl != NULL) {
-      RECURSE(
-          VisitWithExpectation(decl->fun(), Type::Any(zone()), "UNREACHABLE"));
+      RECURSE(VisitWithExpectation(decl->fun(), Type::Any(), "UNREACHABLE"));
       if (!computed_type_->IsFunction()) {
         FAIL(decl->fun(), "function literal expected to be a function");
       }
@@ -135,6 +134,7 @@
   }
 
   // Validate exports.
+  visiting_exports_ = true;
   ReturnStatement* stmt = fun->body()->last()->AsReturnStatement();
   if (stmt == nullptr) {
     FAIL(fun->body()->last(), "last statement in module is not a return");
@@ -148,7 +148,7 @@
   Variable* var = decl->proxy()->var();
   if (var->location() != VariableLocation::PARAMETER) {
     if (GetType(var) == NULL) {
-      SetType(var, Type::Any(zone()));
+      SetType(var, Type::Any());
     } else {
       DCHECK(!GetType(var)->IsFunction());
     }
@@ -165,14 +165,14 @@
   // Set function type so global references to functions have some type
   // (so they can give a more useful error).
   Variable* var = decl->proxy()->var();
-  SetType(var, Type::Function(zone()));
+  SetType(var, Type::Function());
 }
 
 
 void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
   // Extract result type.
   ZoneList<Statement*>* body = fun->body();
-  Type* result_type = Type::Undefined(zone());
+  Type* result_type = Type::Undefined();
   if (body->length() > 0) {
     ReturnStatement* stmt = body->last()->AsReturnStatement();
     if (stmt != NULL) {
@@ -188,9 +188,8 @@
       result_type = computed_type_;
     }
   }
-  Type::FunctionType* type =
-      Type::Function(result_type, Type::Any(), fun->parameter_count(), zone())
-          ->AsFunction();
+  Type* type =
+      Type::Function(result_type, Type::Any(), fun->parameter_count(), zone());
 
   // Extract parameter types.
   bool good = true;
@@ -212,7 +211,7 @@
       property_info_ = NULL;
     }
     SetType(var, computed_type_);
-    type->InitParameter(i, computed_type_);
+    type->AsFunction()->InitParameter(i, computed_type_);
     good = true;
   }
   if (!good) FAIL(fun, "missing parameter type annotations");
@@ -489,19 +488,19 @@
 
 
 void AsmTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
-  Scope* scope = expr->scope();
-  DCHECK(scope->is_function_scope());
   if (in_function_) {
     FAIL(expr, "invalid nested function");
   }
+  Scope* scope = expr->scope();
+  DCHECK(scope->is_function_scope());
 
   if (!expr->bounds().upper->IsFunction()) {
     FAIL(expr, "invalid function literal");
   }
 
-  Type::FunctionType* type = expr->bounds().upper->AsFunction();
+  Type* type = expr->bounds().upper;
   Type* save_return_type = return_type_;
-  return_type_ = type->Result();
+  return_type_ = type->AsFunction()->Result();
   in_function_ = true;
   local_variable_type_.Clear();
   RECURSE(VisitDeclarations(scope->declarations()));
@@ -523,6 +522,9 @@
 
 
 void AsmTyper::VisitConditional(Conditional* expr) {
+  if (!in_function_) {
+    FAIL(expr, "ternary operator inside module body");
+  }
   RECURSE(VisitWithExpectation(expr->condition(), Type::Number(),
                                "condition expected to be integer"));
   if (!computed_type_->Is(cache_.kAsmInt)) {
@@ -554,11 +556,21 @@
 
 
 void AsmTyper::VisitVariableProxy(VariableProxy* expr) {
+  VisitVariableProxy(expr, false);
+}
+
+void AsmTyper::VisitVariableProxy(VariableProxy* expr, bool assignment) {
   Variable* var = expr->var();
   VariableInfo* info = GetVariableInfo(var, false);
+  if (!assignment && !in_function_ && !building_function_tables_ &&
+      !visiting_exports_) {
+    if (var->location() != VariableLocation::PARAMETER || var->index() >= 3) {
+      FAIL(expr, "illegal variable reference in module body");
+    }
+  }
   if (info == NULL || info->type == NULL) {
     if (var->mode() == TEMPORARY) {
-      SetType(var, Type::Any(zone()));
+      SetType(var, Type::Any());
       info = GetVariableInfo(var, false);
     } else {
       FAIL(expr, "unbound variable");
@@ -623,13 +635,13 @@
   ZoneList<ObjectLiteralProperty*>* props = expr->properties();
   for (int i = 0; i < props->length(); ++i) {
     ObjectLiteralProperty* prop = props->at(i);
-    RECURSE(VisitWithExpectation(prop->value(), Type::Any(zone()),
+    RECURSE(VisitWithExpectation(prop->value(), Type::Any(),
                                  "object property expected to be a function"));
     if (!computed_type_->IsFunction()) {
       FAIL(prop->value(), "non-function in function table");
     }
   }
-  IntersectResult(expr, Type::Object(zone()));
+  IntersectResult(expr, Type::Object());
 }
 
 
@@ -639,7 +651,7 @@
   }
   // Allowed for function tables.
   ZoneList<Expression*>* values = expr->values();
-  Type* elem_type = Type::None(zone());
+  Type* elem_type = Type::None();
   for (int i = 0; i < values->length(); ++i) {
     Expression* value = values->at(i);
     RECURSE(VisitWithExpectation(value, Type::Any(), "UNREACHABLE"));
@@ -671,19 +683,23 @@
   RECURSE(VisitWithExpectation(
       expr->value(), type, "assignment value expected to match surrounding"));
   Type* target_type = StorageType(computed_type_);
-  if (intish_ != 0) {
-    FAIL(expr, "intish or floatish assignment");
-  }
   if (expr->target()->IsVariableProxy()) {
-    RECURSE(VisitWithExpectation(expr->target(), target_type,
-                                 "assignment target expected to match value"));
+    if (intish_ != 0) {
+      FAIL(expr, "intish or floatish assignment");
+    }
+    expected_type_ = target_type;
+    VisitVariableProxy(expr->target()->AsVariableProxy(), true);
   } else if (expr->target()->IsProperty()) {
+    int value_intish = intish_;
     Property* property = expr->target()->AsProperty();
     RECURSE(VisitWithExpectation(property->obj(), Type::Any(),
                                  "bad propety object"));
     if (!computed_type_->IsArray()) {
       FAIL(property->obj(), "array expected");
     }
+    if (value_intish != 0 && computed_type_->Is(cache_.kFloat64Array)) {
+      FAIL(expr, "floatish assignment to double array");
+    }
     VisitHeapAccess(property, true, target_type);
   }
   IntersectResult(expr, target_type);
@@ -720,29 +736,32 @@
 
 void AsmTyper::VisitHeapAccess(Property* expr, bool assigning,
                                Type* assignment_type) {
-  Type::ArrayType* array_type = computed_type_->AsArray();
-  size_t size = array_size_;
-  Type* type = array_type->AsArray()->Element();
+  ArrayType* array_type = computed_type_->AsArray();
+  //  size_t size = array_size_;
+  Type* type = array_type->Element();
   if (type->IsFunction()) {
     if (assigning) {
       FAIL(expr, "assigning to function table is illegal");
     }
-    BinaryOperation* bin = expr->key()->AsBinaryOperation();
-    if (bin == NULL || bin->op() != Token::BIT_AND) {
-      FAIL(expr->key(), "expected & in call");
-    }
-    RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
-                                 "array index expected to be integer"));
-    Literal* right = bin->right()->AsLiteral();
-    if (right == NULL || right->raw_value()->ContainsDot()) {
-      FAIL(right, "call mask must be integer");
-    }
-    RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
-                                 "call mask expected to be integer"));
-    if (static_cast<size_t>(right->raw_value()->AsNumber()) != size - 1) {
-      FAIL(right, "call mask must match function table");
-    }
-    bin->set_bounds(Bounds(cache_.kAsmSigned));
+    // TODO(bradnelson): Fix the parser and then un-comment this part
+    // BinaryOperation* bin = expr->key()->AsBinaryOperation();
+    // if (bin == NULL || bin->op() != Token::BIT_AND) {
+    //   FAIL(expr->key(), "expected & in call");
+    // }
+    // RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
+    //                              "array index expected to be integer"));
+    // Literal* right = bin->right()->AsLiteral();
+    // if (right == NULL || right->raw_value()->ContainsDot()) {
+    //   FAIL(right, "call mask must be integer");
+    // }
+    // RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
+    //                              "call mask expected to be integer"));
+    // if (static_cast<size_t>(right->raw_value()->AsNumber()) != size - 1) {
+    //   FAIL(right, "call mask must match function table");
+    // }
+    // bin->set_bounds(Bounds(cache_.kAsmSigned));
+    RECURSE(VisitWithExpectation(expr->key(), cache_.kAsmSigned,
+                                 "must be integer"));
     IntersectResult(expr, type);
   } else {
     Literal* literal = expr->key()->AsLiteral();
@@ -750,24 +769,28 @@
       RECURSE(VisitWithExpectation(literal, cache_.kAsmSigned,
                                    "array index expected to be integer"));
     } else {
-      BinaryOperation* bin = expr->key()->AsBinaryOperation();
-      if (bin == NULL || bin->op() != Token::SAR) {
-        FAIL(expr->key(), "expected >> in heap access");
-      }
-      RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
-                                   "array index expected to be integer"));
-      Literal* right = bin->right()->AsLiteral();
-      if (right == NULL || right->raw_value()->ContainsDot()) {
-        FAIL(right, "heap access shift must be integer");
-      }
-      RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
-                                   "array shift expected to be integer"));
-      int n = static_cast<int>(right->raw_value()->AsNumber());
       int expected_shift = ElementShiftSize(type);
-      if (expected_shift < 0 || n != expected_shift) {
-        FAIL(right, "heap access shift must match element size");
+      if (expected_shift == 0) {
+        RECURSE(Visit(expr->key()));
+      } else {
+        BinaryOperation* bin = expr->key()->AsBinaryOperation();
+        if (bin == NULL || bin->op() != Token::SAR) {
+          FAIL(expr->key(), "expected >> in heap access");
+        }
+        RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
+                                     "array index expected to be integer"));
+        Literal* right = bin->right()->AsLiteral();
+        if (right == NULL || right->raw_value()->ContainsDot()) {
+          FAIL(right, "heap access shift must be integer");
+        }
+        RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
+                                     "array shift expected to be integer"));
+        int n = static_cast<int>(right->raw_value()->AsNumber());
+        if (expected_shift < 0 || n != expected_shift) {
+          FAIL(right, "heap access shift must match element size");
+        }
       }
-      bin->set_bounds(Bounds(cache_.kAsmSigned));
+      expr->key()->set_bounds(Bounds(cache_.kAsmSigned));
     }
     Type* result_type;
     if (type->Is(cache_.kAsmIntArrayElement)) {
@@ -885,7 +908,8 @@
 
   // Only recurse at this point so that we avoid needing
   // stdlib.Math to have a real type.
-  RECURSE(VisitWithExpectation(expr->obj(), Type::Any(), "bad propety object"));
+  RECURSE(
+      VisitWithExpectation(expr->obj(), Type::Any(), "bad property object"));
 
   // For heap view or function table access.
   if (computed_type_->IsArray()) {
@@ -893,13 +917,16 @@
     return;
   }
 
-  // stdlib.x or foreign.x
   VariableProxy* proxy = expr->obj()->AsVariableProxy();
   if (proxy != NULL) {
     Variable* var = proxy->var();
     if (var->location() == VariableLocation::PARAMETER && var->index() == 1) {
-      // foreign.x is ok.
-      SetResult(expr, expected_type_);
+      // foreign.x - Function represent as () -> Any
+      if (Type::Any()->Is(expected_type_)) {
+        SetResult(expr, Type::Function(Type::Any(), zone()));
+      } else {
+        SetResult(expr, expected_type_);
+      }
       return;
     }
   }
@@ -909,6 +936,7 @@
 
 
 void AsmTyper::VisitCall(Call* expr) {
+  Type* expected_type = expected_type_;
   RECURSE(VisitWithExpectation(expr->expression(), Type::Any(),
                                "callee expected to be any"));
   StandardMember standard_member = kNone;
@@ -923,57 +951,69 @@
     FAIL(expr, "calls must be to bound variables or function tables");
   }
   if (computed_type_->IsFunction()) {
-    Type::FunctionType* fun_type = computed_type_->AsFunction();
+    FunctionType* fun_type = computed_type_->AsFunction();
     Type* result_type = fun_type->Result();
     ZoneList<Expression*>* args = expr->arguments();
-    if (fun_type->Arity() != args->length()) {
-      FAIL(expr, "call with wrong arity");
-    }
-    for (int i = 0; i < args->length(); ++i) {
-      Expression* arg = args->at(i);
-      RECURSE(VisitWithExpectation(
-          arg, fun_type->Parameter(i),
-          "call argument expected to match callee parameter"));
-      if (standard_member != kNone && standard_member != kMathFround &&
-          i == 0) {
-        result_type = computed_type_;
-      }
-    }
-    // Handle polymorphic stdlib functions specially.
-    if (standard_member == kMathCeil || standard_member == kMathFloor ||
-        standard_member == kMathSqrt) {
-      if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
-          !args->at(0)->bounds().upper->Is(cache_.kAsmDouble)) {
-        FAIL(expr, "illegal function argument type");
-      }
-    } else if (standard_member == kMathAbs || standard_member == kMathMin ||
-               standard_member == kMathMax) {
-      if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
-          !args->at(0)->bounds().upper->Is(cache_.kAsmDouble) &&
-          !args->at(0)->bounds().upper->Is(cache_.kAsmSigned)) {
-        FAIL(expr, "illegal function argument type");
-      }
-      if (args->length() > 1) {
-        Type* other = Type::Intersect(args->at(0)->bounds().upper,
-                                      args->at(1)->bounds().upper, zone());
-        if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
-            !other->Is(cache_.kAsmSigned)) {
-          FAIL(expr, "function arguments types don't match");
+    if (Type::Any()->Is(result_type)) {
+      // For foreign calls.
+      ZoneList<Expression*>* args = expr->arguments();
+      for (int i = 0; i < args->length(); ++i) {
+        Expression* arg = args->at(i);
+        RECURSE(VisitWithExpectation(
+            arg, Type::Any(), "foreign call argument expected to be any"));
+        // Checking for asm extern types explicitly, as the type system
+        // doesn't correctly check their inheritance relationship.
+        if (!computed_type_->Is(cache_.kAsmSigned) &&
+            !computed_type_->Is(cache_.kAsmFixnum) &&
+            !computed_type_->Is(cache_.kAsmDouble)) {
+          FAIL(arg,
+               "foreign call argument expected to be int, double, or fixnum");
         }
       }
+      intish_ = 0;
+      expr->expression()->set_bounds(
+          Bounds(Type::Function(Type::Any(), zone())));
+      IntersectResult(expr, expected_type);
+    } else {
+      if (fun_type->Arity() != args->length()) {
+        FAIL(expr, "call with wrong arity");
+      }
+      for (int i = 0; i < args->length(); ++i) {
+        Expression* arg = args->at(i);
+        RECURSE(VisitWithExpectation(
+            arg, fun_type->Parameter(i),
+            "call argument expected to match callee parameter"));
+        if (standard_member != kNone && standard_member != kMathFround &&
+            i == 0) {
+          result_type = computed_type_;
+        }
+      }
+      // Handle polymorphic stdlib functions specially.
+      if (standard_member == kMathCeil || standard_member == kMathFloor ||
+          standard_member == kMathSqrt) {
+        if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
+            !args->at(0)->bounds().upper->Is(cache_.kAsmDouble)) {
+          FAIL(expr, "illegal function argument type");
+        }
+      } else if (standard_member == kMathAbs || standard_member == kMathMin ||
+                 standard_member == kMathMax) {
+        if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
+            !args->at(0)->bounds().upper->Is(cache_.kAsmDouble) &&
+            !args->at(0)->bounds().upper->Is(cache_.kAsmSigned)) {
+          FAIL(expr, "illegal function argument type");
+        }
+        if (args->length() > 1) {
+          Type* other = Type::Intersect(args->at(0)->bounds().upper,
+                                        args->at(1)->bounds().upper, zone());
+          if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
+              !other->Is(cache_.kAsmSigned)) {
+            FAIL(expr, "function arguments types don't match");
+          }
+        }
+      }
+      intish_ = 0;
+      IntersectResult(expr, result_type);
     }
-    intish_ = 0;
-    IntersectResult(expr, result_type);
-  } else if (computed_type_->Is(Type::Any())) {
-    // For foreign calls.
-    ZoneList<Expression*>* args = expr->arguments();
-    for (int i = 0; i < args->length(); ++i) {
-      Expression* arg = args->at(i);
-      RECURSE(VisitWithExpectation(arg, Type::Any(),
-                                   "foreign call argument expected to be any"));
-    }
-    intish_ = kMaxUncombinedAdditiveSteps;
-    IntersectResult(expr, Type::Number());
   } else {
     FAIL(expr, "invalid callee");
   }
@@ -987,7 +1027,7 @@
   RECURSE(VisitWithExpectation(expr->expression(), Type::Any(),
                                "expected stdlib function"));
   if (computed_type_->IsFunction()) {
-    Type::FunctionType* fun_type = computed_type_->AsFunction();
+    FunctionType* fun_type = computed_type_->AsFunction();
     ZoneList<Expression*>* args = expr->arguments();
     if (fun_type->Arity() != args->length())
       FAIL(expr, "call with wrong arity");
@@ -1011,6 +1051,9 @@
 
 
 void AsmTyper::VisitUnaryOperation(UnaryOperation* expr) {
+  if (!in_function_) {
+    FAIL(expr, "unary operator inside module body");
+  }
   switch (expr->op()) {
     case Token::NOT:  // Used to encode != and !==
       RECURSE(VisitWithExpectation(expr->expression(), cache_.kAsmInt,
@@ -1079,6 +1122,27 @@
 
 
 void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
+  if (!in_function_) {
+    if (expr->op() != Token::BIT_OR && expr->op() != Token::MUL) {
+      FAIL(expr, "illegal binary operator inside module body");
+    }
+    if (!(expr->left()->IsProperty() || expr->left()->IsVariableProxy()) ||
+        !expr->right()->IsLiteral()) {
+      FAIL(expr, "illegal computation inside module body");
+    }
+    DCHECK(expr->right()->AsLiteral() != nullptr);
+    const AstValue* right_value = expr->right()->AsLiteral()->raw_value();
+    if (expr->op() == Token::BIT_OR) {
+      if (right_value->AsNumber() != 0.0 || right_value->ContainsDot()) {
+        FAIL(expr, "illegal integer annotation value");
+      }
+    }
+    if (expr->op() == Token::MUL) {
+      if (right_value->AsNumber() != 1.0 && right_value->ContainsDot()) {
+        FAIL(expr, "illegal double annotation value");
+      }
+    }
+  }
   switch (expr->op()) {
     case Token::COMMA: {
       RECURSE(VisitWithExpectation(expr->left(), Type::Any(),
@@ -1095,6 +1159,9 @@
       // BIT_OR allows Any since it is used as a type coercion.
       VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmInt,
                                   cache_.kAsmSigned, true);
+      if (expr->left()->IsCall() && expr->op() == Token::BIT_OR) {
+        expr->left()->set_bounds(Bounds(cache_.kAsmSigned));
+      }
       return;
     }
     case Token::BIT_XOR: {
@@ -1181,6 +1248,9 @@
       } else if (expr->op() == Token::MUL && expr->right()->IsLiteral() &&
                  right_type->Is(cache_.kAsmDouble)) {
         // For unary +, expressed as x * 1.0
+        if (expr->left()->IsCall() && expr->op() == Token::MUL) {
+          expr->left()->set_bounds(Bounds(cache_.kAsmDouble));
+        }
         IntersectResult(expr, cache_.kAsmDouble);
         return;
       } else if (type->Is(cache_.kAsmFloat) && expr->op() != Token::MOD) {
@@ -1204,6 +1274,9 @@
 
 
 void AsmTyper::VisitCompareOperation(CompareOperation* expr) {
+  if (!in_function_) {
+    FAIL(expr, "comparison inside module body");
+  }
   Token::Value op = expr->op();
   if (op != Token::EQ && op != Token::NE && op != Token::LT &&
       op != Token::LTE && op != Token::GT && op != Token::GTE) {
@@ -1295,7 +1368,7 @@
   if (allow_simd_) {
     InitializeStdlibSIMD();
   }
-  Type* number_type = Type::Number(zone());
+  Type* number_type = Type::Number();
   Type* double_type = cache_.kAsmDouble;
   Type* double_fn1_type = Type::Function(double_type, double_type, zone());
   Type* double_fn2_type =
@@ -1352,7 +1425,7 @@
   stdlib_types_["Infinity"]->standard_member = kInfinity;
   stdlib_types_["NaN"] = new (zone()) VariableInfo(double_type);
   stdlib_types_["NaN"]->standard_member = kNaN;
-  Type* buffer_type = Type::Any(zone());
+  Type* buffer_type = Type::Any();
 #define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
   stdlib_types_[#TypeName "Array"] = new (zone()) VariableInfo(  \
       Type::Function(cache_.k##TypeName##Array, buffer_type, zone()));
@@ -1469,7 +1542,7 @@
   expected_type_ = expected_type;
   RECURSE(Visit(expr));
   Type* bounded_type = Type::Intersect(computed_type_, expected_type_, zone());
-  if (bounded_type->Is(Type::None(zone()))) {
+  if (bounded_type->Is(Type::None())) {
 #ifdef DEBUG
     PrintF("Computed type: ");
     computed_type_->Print();
@@ -1482,8 +1555,7 @@
 }
 
 
-void AsmTyper::VisitRewritableAssignmentExpression(
-    RewritableAssignmentExpression* expr) {
+void AsmTyper::VisitRewritableExpression(RewritableExpression* expr) {
   RECURSE(Visit(expr->expression()));
 }
 
diff --git a/src/typing-asm.h b/src/typing-asm.h
index b7f5383..54796ed 100644
--- a/src/typing-asm.h
+++ b/src/typing-asm.h
@@ -22,7 +22,7 @@
   explicit AsmTyper(Isolate* isolate, Zone* zone, Script* script,
                     FunctionLiteral* root);
   bool Validate();
-  void set_allow_simd(bool simd);
+  void set_allow_simd(bool simd) { allow_simd_ = simd; }
   const char* error_message() { return error_message_; }
 
   enum StandardMember {
@@ -113,6 +113,7 @@
 
   bool in_function_;  // In module function?
   bool building_function_tables_;
+  bool visiting_exports_;
 
   TypeCache const& cache_;
 
@@ -161,6 +162,8 @@
 
   void VisitLiteral(Literal* expr, bool is_return);
 
+  void VisitVariableProxy(VariableProxy* expr, bool assignment);
+
   void VisitIntegerBitwiseOperator(BinaryOperation* expr, Type* left_expected,
                                    Type* right_expected, Type* result_type,
                                    bool conversion);
diff --git a/src/utils-inl.h b/src/utils-inl.h
new file mode 100644
index 0000000..617d7fc
--- /dev/null
+++ b/src/utils-inl.h
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UTILS_INL_H_
+#define V8_UTILS_INL_H_
+
+#include "src/utils.h"
+
+#include "include/v8-platform.h"
+#include "src/base/platform/time.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class TimedScope {
+ public:
+  explicit TimedScope(double* result)
+      : start_(TimestampMs()), result_(result) {}
+
+  ~TimedScope() { *result_ = TimestampMs() - start_; }
+
+ private:
+  static inline double TimestampMs() {
+    return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
+           static_cast<double>(base::Time::kMillisecondsPerSecond);
+  }
+
+  double start_;
+  double* result_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_UTILS_INL_H_
diff --git a/src/utils.h b/src/utils.h
index 1ea2d56..d779979 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -1125,6 +1125,19 @@
   int id_;
 };
 
+class TokenDispenserForFinally {
+ public:
+  int GetBreakContinueToken() { return next_token_++; }
+  static const int kFallThroughToken = 0;
+  static const int kThrowToken = 1;
+  static const int kReturnToken = 2;
+
+  static const int kFirstBreakContinueToken = 3;
+  static const int kInvalidToken = -1;
+
+ private:
+  int next_token_ = kFirstBreakContinueToken;
+};
 
 // ----------------------------------------------------------------------------
 // I/O support.
@@ -1715,75 +1728,47 @@
   return limit;
 }
 
-static inline double ReadDoubleValue(const void* p) {
-#ifndef V8_TARGET_ARCH_MIPS
-  return *reinterpret_cast<const double*>(p);
+template <typename V>
+static inline V ReadUnalignedValue(const void* p) {
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
+  return *reinterpret_cast<const V*>(p);
+#else
+  V r;
+  memmove(&r, p, sizeof(V));
+  return r;
+#endif  // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+}
+
+template <typename V>
+static inline void WriteUnalignedValue(void* p, V value) {
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
+  *(reinterpret_cast<V*>(p)) = value;
 #else   // V8_TARGET_ARCH_MIPS
-  // Prevent compiler from using load-double (mips ldc1) on (possibly)
-  // non-64-bit aligned address.
-  union conversion {
-    double d;
-    uint32_t u[2];
-  } c;
-  const uint32_t* ptr = reinterpret_cast<const uint32_t*>(p);
-  c.u[0] = *ptr;
-  c.u[1] = *(ptr + 1);
-  return c.d;
-#endif  // V8_TARGET_ARCH_MIPS
+  memmove(p, &value, sizeof(V));
+#endif  // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+}
+
+static inline double ReadDoubleValue(const void* p) {
+  return ReadUnalignedValue<double>(p);
 }
 
 
 static inline void WriteDoubleValue(void* p, double value) {
-#ifndef V8_TARGET_ARCH_MIPS
-  *(reinterpret_cast<double*>(p)) = value;
-#else   // V8_TARGET_ARCH_MIPS
-  // Prevent compiler from using load-double (mips sdc1) on (possibly)
-  // non-64-bit aligned address.
-  union conversion {
-    double d;
-    uint32_t u[2];
-  } c;
-  c.d = value;
-  uint32_t* ptr = reinterpret_cast<uint32_t*>(p);
-  *ptr = c.u[0];
-  *(ptr + 1) = c.u[1];
-#endif  // V8_TARGET_ARCH_MIPS
+  WriteUnalignedValue(p, value);
 }
 
 
 static inline uint16_t ReadUnalignedUInt16(const void* p) {
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
-  return *reinterpret_cast<const uint16_t*>(p);
-#else   // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-  // Prevent compiler from using load-half (mips lh) on (possibly)
-  // non-16-bit aligned address.
-  union conversion {
-    uint16_t h;
-    uint8_t b[2];
-  } c;
-  const uint8_t* ptr = reinterpret_cast<const uint8_t*>(p);
-  c.b[0] = *ptr;
-  c.b[1] = *(ptr + 1);
-  return c.h;
-#endif  // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+  return ReadUnalignedValue<uint16_t>(p);
 }
 
 
 static inline void WriteUnalignedUInt16(void* p, uint16_t value) {
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
-  *(reinterpret_cast<uint16_t*>(p)) = value;
-#else   // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-  // Prevent compiler from using store-half (mips sh) on (possibly)
-  // non-16-bit aligned address.
-  union conversion {
-    uint16_t h;
-    uint8_t b[2];
-  } c;
-  c.h = value;
-  uint8_t* ptr = reinterpret_cast<uint8_t*>(p);
-  *ptr = c.b[0];
-  *(ptr + 1) = c.b[1];
-#endif  // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+  WriteUnalignedValue(p, value);
+}
+
+static inline void WriteUnalignedUInt32(void* p, uint32_t value) {
+  WriteUnalignedValue(p, value);
 }
 
 }  // namespace internal
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index d60548d..6533aa1 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -8,6 +8,7 @@
 #include "src/vm-state.h"
 #include "src/log.h"
 #include "src/simulator.h"
+#include "src/tracing/trace-event.h"
 
 namespace v8 {
 namespace internal {
@@ -39,8 +40,11 @@
 template <StateTag Tag>
 VMState<Tag>::VMState(Isolate* isolate)
     : isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
-  if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
-    LOG(isolate_, TimerEvent(Logger::START, TimerEventExternal::name()));
+  if (previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
+    if (FLAG_log_timer_events) {
+      LOG(isolate_, TimerEvent(Logger::START, TimerEventExternal::name()));
+    }
+    TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
   }
   isolate_->set_current_vm_state(Tag);
 }
@@ -48,24 +52,34 @@
 
 template <StateTag Tag>
 VMState<Tag>::~VMState() {
-  if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
-    LOG(isolate_, TimerEvent(Logger::END, TimerEventExternal::name()));
+  if (previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
+    if (FLAG_log_timer_events) {
+      LOG(isolate_, TimerEvent(Logger::END, TimerEventExternal::name()));
+    }
+    TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
   }
   isolate_->set_current_vm_state(previous_tag_);
 }
 
-
 ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
     : isolate_(isolate),
       callback_(callback),
-      previous_scope_(isolate->external_callback_scope()) {
+      previous_scope_(isolate->external_callback_scope()),
+      timer_(&isolate->counters()->runtime_call_stats()->ExternalCallback,
+             isolate->counters()->runtime_call_stats()->current_timer()) {
 #ifdef USE_SIMULATOR
   scope_address_ = Simulator::current(isolate)->get_sp();
 #endif
   isolate_->set_external_callback_scope(this);
+  if (FLAG_runtime_call_stats) {
+    isolate_->counters()->runtime_call_stats()->Enter(&timer_);
+  }
 }
 
 ExternalCallbackScope::~ExternalCallbackScope() {
+  if (FLAG_runtime_call_stats) {
+    isolate_->counters()->runtime_call_stats()->Leave(&timer_);
+  }
   isolate_->set_external_callback_scope(previous_scope_);
 }
 
diff --git a/src/vm-state.h b/src/vm-state.h
index 7e723a5..3f8d381 100644
--- a/src/vm-state.h
+++ b/src/vm-state.h
@@ -6,6 +6,7 @@
 #define V8_VM_STATE_H_
 
 #include "src/allocation.h"
+#include "src/counters.h"
 #include "src/isolate.h"
 
 namespace v8 {
@@ -48,6 +49,7 @@
   Isolate* isolate_;
   Address callback_;
   ExternalCallbackScope* previous_scope_;
+  RuntimeCallTimer timer_;
 #ifdef USE_SIMULATOR
   Address scope_address_;
 #endif
diff --git a/src/wasm/asm-wasm-builder.cc b/src/wasm/asm-wasm-builder.cc
index 30f8464..ee5427b 100644
--- a/src/wasm/asm-wasm-builder.cc
+++ b/src/wasm/asm-wasm-builder.cc
@@ -27,7 +27,8 @@
 
 class AsmWasmBuilderImpl : public AstVisitor {
  public:
-  AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal)
+  AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal,
+                     Handle<Object> foreign)
       : local_variables_(HashMap::PointersMatch,
                          ZoneHashMap::kDefaultHashMapCapacity,
                          ZoneAllocationPolicy(zone)),
@@ -44,17 +45,23 @@
         literal_(literal),
         isolate_(isolate),
         zone_(zone),
+        foreign_(foreign),
         cache_(TypeCache::Get()),
         breakable_blocks_(zone),
         block_size_(0),
-        init_function_index(0) {
+        init_function_index_(0),
+        next_table_index_(0),
+        function_tables_(HashMap::PointersMatch,
+                         ZoneHashMap::kDefaultHashMapCapacity,
+                         ZoneAllocationPolicy(zone)),
+        imported_function_table_(this) {
     InitializeAstVisitor(isolate);
   }
 
   void InitializeInitFunction() {
     unsigned char init[] = "__init__";
-    init_function_index = builder_->AddFunction();
-    current_function_builder_ = builder_->FunctionAt(init_function_index);
+    init_function_index_ = builder_->AddFunction();
+    current_function_builder_ = builder_->FunctionAt(init_function_index_);
     current_function_builder_->SetName(init, 8);
     current_function_builder_->ReturnType(kAstStmt);
     current_function_builder_->Exported(1);
@@ -70,7 +77,7 @@
 
   void VisitFunctionDeclaration(FunctionDeclaration* decl) {
     DCHECK(!in_function_);
-    DCHECK(current_function_builder_ == nullptr);
+    DCHECK_NULL(current_function_builder_);
     uint16_t index = LookupOrInsertFunction(decl->proxy()->var());
     current_function_builder_ = builder_->FunctionAt(index);
     in_function_ = true;
@@ -103,11 +110,15 @@
         }
       }
     }
-    DCHECK(in_function_);
-    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
-                         static_cast<byte>(stmt->statements()->length()));
-    RECURSE(VisitStatements(stmt->statements()));
-    DCHECK(block_size_ >= 0);
+    if (in_function_) {
+      BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
+                           false,
+                           static_cast<byte>(stmt->statements()->length()));
+      RECURSE(VisitStatements(stmt->statements()));
+      DCHECK(block_size_ >= 0);
+    } else {
+      RECURSE(VisitStatements(stmt->statements()));
+    }
   }
 
   class BlockVisitor {
@@ -162,7 +173,7 @@
 
   void VisitContinueStatement(ContinueStatement* stmt) {
     DCHECK(in_function_);
-    DCHECK(stmt->target() != NULL);
+    DCHECK_NOT_NULL(stmt->target());
     int i = static_cast<int>(breakable_blocks_.size()) - 1;
     int block_distance = 0;
     for (; i >= 0; i--) {
@@ -183,7 +194,7 @@
 
   void VisitBreakStatement(BreakStatement* stmt) {
     DCHECK(in_function_);
-    DCHECK(stmt->target() != NULL);
+    DCHECK_NOT_NULL(stmt->target());
     int i = static_cast<int>(breakable_blocks_.size()) - 1;
     int block_distance = 0;
     for (; i >= 0; i--) {
@@ -229,7 +240,7 @@
   void CompileCase(CaseClause* clause, uint16_t fall_through,
                    VariableProxy* tag) {
     Literal* label = clause->label()->AsLiteral();
-    DCHECK(label != nullptr);
+    DCHECK_NOT_NULL(label);
     block_size_++;
     current_function_builder_->Emit(kExprIf);
     current_function_builder_->Emit(kExprI32Ior);
@@ -247,7 +258,7 @@
 
   void VisitSwitchStatement(SwitchStatement* stmt) {
     VariableProxy* tag = stmt->tag()->AsVariableProxy();
-    DCHECK(tag != NULL);
+    DCHECK_NOT_NULL(tag);
     BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
                          0);
     uint16_t fall_through = current_function_builder_->AddLocal(kAstI32);
@@ -332,20 +343,20 @@
     Scope* scope = expr->scope();
     if (in_function_) {
       if (expr->bounds().lower->IsFunction()) {
-        Type::FunctionType* func_type = expr->bounds().lower->AsFunction();
+        FunctionType* func_type = expr->bounds().lower->AsFunction();
         LocalType return_type = TypeFrom(func_type->Result());
         current_function_builder_->ReturnType(return_type);
         for (int i = 0; i < expr->parameter_count(); i++) {
           LocalType type = TypeFrom(func_type->Parameter(i));
-          DCHECK(type != kAstStmt);
+          DCHECK_NE(kAstStmt, type);
           LookupOrInsertLocal(scope->parameter(i), type);
         }
       } else {
         UNREACHABLE();
       }
     }
-    RECURSE(VisitDeclarations(scope->declarations()));
     RECURSE(VisitStatements(expr->body()));
+    RECURSE(VisitDeclarations(scope->declarations()));
   }
 
   void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
@@ -363,34 +374,26 @@
   void VisitVariableProxy(VariableProxy* expr) {
     if (in_function_) {
       Variable* var = expr->var();
-      if (var->is_function()) {
-        DCHECK(!is_set_op_);
-        std::vector<uint8_t> index =
-            UnsignedLEB128From(LookupOrInsertFunction(var));
-        current_function_builder_->EmitCode(
-            &index[0], static_cast<uint32_t>(index.size()));
-      } else {
-        if (is_set_op_) {
-          if (var->IsContextSlot()) {
-            current_function_builder_->Emit(kExprStoreGlobal);
-          } else {
-            current_function_builder_->Emit(kExprSetLocal);
-          }
-          is_set_op_ = false;
-        } else {
-          if (var->IsContextSlot()) {
-            current_function_builder_->Emit(kExprLoadGlobal);
-          } else {
-            current_function_builder_->Emit(kExprGetLocal);
-          }
-        }
-        LocalType var_type = TypeOf(expr);
-        DCHECK(var_type != kAstStmt);
+      if (is_set_op_) {
         if (var->IsContextSlot()) {
-          AddLeb128(LookupOrInsertGlobal(var, var_type), false);
+          current_function_builder_->Emit(kExprStoreGlobal);
         } else {
-          AddLeb128(LookupOrInsertLocal(var, var_type), true);
+          current_function_builder_->Emit(kExprSetLocal);
         }
+        is_set_op_ = false;
+      } else {
+        if (var->IsContextSlot()) {
+          current_function_builder_->Emit(kExprLoadGlobal);
+        } else {
+          current_function_builder_->Emit(kExprGetLocal);
+        }
+      }
+      LocalType var_type = TypeOf(expr);
+      DCHECK_NE(kAstStmt, var_type);
+      if (var->IsContextSlot()) {
+        AddLeb128(LookupOrInsertGlobal(var, var_type), false);
+      } else {
+        AddLeb128(LookupOrInsertLocal(var, var_type), true);
       }
     }
   }
@@ -433,10 +436,10 @@
       ObjectLiteralProperty* prop = props->at(i);
       DCHECK(marking_exported);
       VariableProxy* expr = prop->value()->AsVariableProxy();
-      DCHECK(expr != nullptr);
+      DCHECK_NOT_NULL(expr);
       Variable* var = expr->var();
       Literal* name = prop->key()->AsLiteral();
-      DCHECK(name != nullptr);
+      DCHECK_NOT_NULL(name);
       DCHECK(name->IsPropertyName());
       const AstRawString* raw_name = name->AsRawPropertyName();
       if (var->is_function()) {
@@ -451,7 +454,7 @@
   void VisitArrayLiteral(ArrayLiteral* expr) { UNREACHABLE(); }
 
   void LoadInitFunction() {
-    current_function_builder_ = builder_->FunctionAt(init_function_index);
+    current_function_builder_ = builder_->FunctionAt(init_function_index_);
     in_function_ = true;
   }
 
@@ -460,11 +463,155 @@
     current_function_builder_ = nullptr;
   }
 
+  void AddFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
+    FunctionType* func_type =
+        funcs->bounds().lower->AsArray()->Element()->AsFunction();
+    LocalType return_type = TypeFrom(func_type->Result());
+    FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
+                             func_type->Arity());
+    if (return_type != kAstStmt) {
+      sig.AddReturn(static_cast<LocalType>(return_type));
+    }
+    for (int i = 0; i < func_type->Arity(); i++) {
+      sig.AddParam(TypeFrom(func_type->Parameter(i)));
+    }
+    uint16_t signature_index = builder_->AddSignature(sig.Build());
+    InsertFunctionTable(table->var(), next_table_index_, signature_index);
+    next_table_index_ += funcs->values()->length();
+    for (int i = 0; i < funcs->values()->length(); i++) {
+      VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
+      DCHECK_NOT_NULL(func);
+      builder_->AddIndirectFunction(LookupOrInsertFunction(func->var()));
+    }
+  }
+
+  struct FunctionTableIndices : public ZoneObject {
+    uint32_t start_index;
+    uint16_t signature_index;
+  };
+
+  void InsertFunctionTable(Variable* v, uint32_t start_index,
+                           uint16_t signature_index) {
+    FunctionTableIndices* container = new (zone()) FunctionTableIndices();
+    container->start_index = start_index;
+    container->signature_index = signature_index;
+    ZoneHashMap::Entry* entry = function_tables_.LookupOrInsert(
+        v, ComputePointerHash(v), ZoneAllocationPolicy(zone()));
+    entry->value = container;
+  }
+
+  FunctionTableIndices* LookupFunctionTable(Variable* v) {
+    ZoneHashMap::Entry* entry =
+        function_tables_.Lookup(v, ComputePointerHash(v));
+    DCHECK_NOT_NULL(entry);
+    return reinterpret_cast<FunctionTableIndices*>(entry->value);
+  }
+
+  class ImportedFunctionTable {
+   private:
+    class ImportedFunctionIndices : public ZoneObject {
+     public:
+      const unsigned char* name_;
+      int name_length_;
+      WasmModuleBuilder::SignatureMap signature_to_index_;
+
+      ImportedFunctionIndices(const unsigned char* name, int name_length,
+                              Zone* zone)
+          : name_(name), name_length_(name_length), signature_to_index_(zone) {}
+    };
+    ZoneHashMap table_;
+    AsmWasmBuilderImpl* builder_;
+
+   public:
+    explicit ImportedFunctionTable(AsmWasmBuilderImpl* builder)
+        : table_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+                 ZoneAllocationPolicy(builder->zone())),
+          builder_(builder) {}
+
+    void AddImport(Variable* v, const unsigned char* name, int name_length) {
+      ImportedFunctionIndices* indices = new (builder_->zone())
+          ImportedFunctionIndices(name, name_length, builder_->zone());
+      ZoneHashMap::Entry* entry = table_.LookupOrInsert(
+          v, ComputePointerHash(v), ZoneAllocationPolicy(builder_->zone()));
+      entry->value = indices;
+    }
+
+    uint16_t GetFunctionIndex(Variable* v, FunctionSig* sig) {
+      ZoneHashMap::Entry* entry = table_.Lookup(v, ComputePointerHash(v));
+      DCHECK_NOT_NULL(entry);
+      ImportedFunctionIndices* indices =
+          reinterpret_cast<ImportedFunctionIndices*>(entry->value);
+      WasmModuleBuilder::SignatureMap::iterator pos =
+          indices->signature_to_index_.find(sig);
+      if (pos != indices->signature_to_index_.end()) {
+        return pos->second;
+      } else {
+        uint16_t index = builder_->builder_->AddFunction();
+        indices->signature_to_index_[sig] = index;
+        WasmFunctionBuilder* function = builder_->builder_->FunctionAt(index);
+        function->External(1);
+        function->SetName(indices->name_, indices->name_length_);
+        if (sig->return_count() > 0) {
+          function->ReturnType(sig->GetReturn());
+        }
+        for (size_t i = 0; i < sig->parameter_count(); i++) {
+          function->AddParam(sig->GetParam(i));
+        }
+        return index;
+      }
+    }
+  };
+
   void VisitAssignment(Assignment* expr) {
     bool in_init = false;
     if (!in_function_) {
+      BinaryOperation* binop = expr->value()->AsBinaryOperation();
+      if (binop != nullptr) {
+        Property* prop = binop->left()->AsProperty();
+        DCHECK_NOT_NULL(prop);
+        LoadInitFunction();
+        is_set_op_ = true;
+        RECURSE(Visit(expr->target()));
+        DCHECK(!is_set_op_);
+        if (binop->op() == Token::MUL) {
+          DCHECK(binop->right()->IsLiteral());
+          DCHECK_EQ(1.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
+          DCHECK(binop->right()->AsLiteral()->raw_value()->ContainsDot());
+          VisitForeignVariable(true, prop);
+        } else if (binop->op() == Token::BIT_OR) {
+          DCHECK(binop->right()->IsLiteral());
+          DCHECK_EQ(0.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
+          DCHECK(!binop->right()->AsLiteral()->raw_value()->ContainsDot());
+          VisitForeignVariable(false, prop);
+        } else {
+          UNREACHABLE();
+        }
+        UnLoadInitFunction();
+        return;
+      }
       // TODO(bradnelson): Get rid of this.
       if (TypeOf(expr->value()) == kAstStmt) {
+        Property* prop = expr->value()->AsProperty();
+        if (prop != nullptr) {
+          VariableProxy* vp = prop->obj()->AsVariableProxy();
+          if (vp != nullptr && vp->var()->IsParameter() &&
+              vp->var()->index() == 1) {
+            VariableProxy* target = expr->target()->AsVariableProxy();
+            if (target->bounds().lower->Is(Type::Function())) {
+              const AstRawString* name =
+                  prop->key()->AsLiteral()->AsRawPropertyName();
+              imported_function_table_.AddImport(
+                  target->var(), name->raw_data(), name->length());
+            }
+          }
+        }
+        ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
+        if (funcs != nullptr &&
+            funcs->bounds().lower->AsArray()->Element()->IsFunction()) {
+          VariableProxy* target = expr->target()->AsVariableProxy();
+          DCHECK_NOT_NULL(target);
+          AddFunctionTable(target, funcs);
+        }
         return;
       }
       in_init = true;
@@ -493,10 +640,59 @@
 
   void VisitThrow(Throw* expr) { UNREACHABLE(); }
 
+  void VisitForeignVariable(bool is_float, Property* expr) {
+    DCHECK(expr->obj()->AsVariableProxy());
+    DCHECK(VariableLocation::PARAMETER ==
+           expr->obj()->AsVariableProxy()->var()->location());
+    DCHECK_EQ(1, expr->obj()->AsVariableProxy()->var()->index());
+    Literal* key_literal = expr->key()->AsLiteral();
+    DCHECK_NOT_NULL(key_literal);
+    if (!key_literal->value().is_null() && !foreign_.is_null() &&
+        foreign_->IsObject()) {
+      Handle<Name> name =
+          i::Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
+      MaybeHandle<Object> maybe_value = i::Object::GetProperty(foreign_, name);
+      if (!maybe_value.is_null()) {
+        Handle<Object> value = maybe_value.ToHandleChecked();
+        if (is_float) {
+          MaybeHandle<Object> maybe_nvalue = i::Object::ToNumber(value);
+          if (!maybe_nvalue.is_null()) {
+            Handle<Object> nvalue = maybe_nvalue.ToHandleChecked();
+            if (nvalue->IsNumber()) {
+              double val = nvalue->Number();
+              byte code[] = {WASM_F64(val)};
+              current_function_builder_->EmitCode(code, sizeof(code));
+              return;
+            }
+          }
+        } else {
+          MaybeHandle<Object> maybe_nvalue =
+              i::Object::ToInt32(isolate_, value);
+          if (!maybe_nvalue.is_null()) {
+            Handle<Object> nvalue = maybe_nvalue.ToHandleChecked();
+            if (nvalue->IsNumber()) {
+              int32_t val = static_cast<int32_t>(nvalue->Number());
+              byte code[] = {WASM_I32(val)};
+              current_function_builder_->EmitCode(code, sizeof(code));
+              return;
+            }
+          }
+        }
+      }
+    }
+    if (is_float) {
+      byte code[] = {WASM_F64(std::numeric_limits<double>::quiet_NaN())};
+      current_function_builder_->EmitCode(code, sizeof(code));
+    } else {
+      byte code[] = {WASM_I32(0)};
+      current_function_builder_->EmitCode(code, sizeof(code));
+    }
+  }
+
   void VisitProperty(Property* expr) {
     Expression* obj = expr->obj();
-    DCHECK(obj->bounds().lower == obj->bounds().upper);
-    TypeImpl<ZoneTypeConfig>* type = obj->bounds().lower;
+    DCHECK_EQ(obj->bounds().lower, obj->bounds().upper);
+    Type* type = obj->bounds().lower;
     MachineType mtype;
     int size;
     if (type->Is(cache_.kUint8Array)) {
@@ -533,29 +729,38 @@
         WasmOpcodes::LoadStoreOpcodeOf(mtype, is_set_op_),
         WasmOpcodes::LoadStoreAccessOf(false));
     is_set_op_ = false;
-    Literal* value = expr->key()->AsLiteral();
-    if (value) {
-      DCHECK(value->raw_value()->IsNumber());
-      DCHECK(kAstI32 == TypeOf(value));
-      int val = static_cast<int>(value->raw_value()->AsNumber());
-      byte code[] = {WASM_I32(val * size)};
-      current_function_builder_->EmitCode(code, sizeof(code));
+    if (size == 1) {
+      // Allow more general expression in byte arrays than the spec
+      // strictly permits.
+      // Early versions of Emscripten emit HEAP8[HEAP32[..]|0] in
+      // places that strictly should be HEAP8[HEAP32[..]>>0].
+      RECURSE(Visit(expr->key()));
       return;
-    }
-    BinaryOperation* binop = expr->key()->AsBinaryOperation();
-    if (binop) {
-      DCHECK(Token::SAR == binop->op());
-      DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
-      DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
-      DCHECK(size ==
-             1 << static_cast<int>(
-                 binop->right()->AsLiteral()->raw_value()->AsNumber()));
-      // Mask bottom bits to match asm.js behavior.
-      current_function_builder_->Emit(kExprI32And);
-      byte code[] = {WASM_I8(~(size - 1))};
-      current_function_builder_->EmitCode(code, sizeof(code));
-      RECURSE(Visit(binop->left()));
-      return;
+    } else {
+      Literal* value = expr->key()->AsLiteral();
+      if (value) {
+        DCHECK(value->raw_value()->IsNumber());
+        DCHECK_EQ(kAstI32, TypeOf(value));
+        int val = static_cast<int>(value->raw_value()->AsNumber());
+        byte code[] = {WASM_I32(val * size)};
+        current_function_builder_->EmitCode(code, sizeof(code));
+        return;
+      }
+      BinaryOperation* binop = expr->key()->AsBinaryOperation();
+      if (binop) {
+        DCHECK_EQ(Token::SAR, binop->op());
+        DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
+        DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
+        DCHECK_EQ(size,
+                  1 << static_cast<int>(
+                      binop->right()->AsLiteral()->raw_value()->AsNumber()));
+        // Mask bottom bits to match asm.js behavior.
+        current_function_builder_->Emit(kExprI32And);
+        byte code[] = {WASM_I8(~(size - 1))};
+        current_function_builder_->EmitCode(code, sizeof(code));
+        RECURSE(Visit(binop->left()));
+        return;
+      }
     }
     UNREACHABLE();
   }
@@ -565,18 +770,54 @@
     switch (call_type) {
       case Call::OTHER_CALL: {
         DCHECK(in_function_);
-        current_function_builder_->Emit(kExprCallFunction);
-        RECURSE(Visit(expr->expression()));
-        ZoneList<Expression*>* args = expr->arguments();
-        for (int i = 0; i < args->length(); ++i) {
-          Expression* arg = args->at(i);
-          RECURSE(Visit(arg));
+        uint16_t index;
+        VariableProxy* vp = expr->expression()->AsVariableProxy();
+        if (vp != nullptr &&
+            Type::Any()->Is(vp->bounds().lower->AsFunction()->Result())) {
+          LocalType return_type = TypeOf(expr);
+          ZoneList<Expression*>* args = expr->arguments();
+          FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
+                                   args->length());
+          if (return_type != kAstStmt) {
+            sig.AddReturn(return_type);
+          }
+          for (int i = 0; i < args->length(); i++) {
+            sig.AddParam(TypeOf(args->at(i)));
+          }
+          index =
+              imported_function_table_.GetFunctionIndex(vp->var(), sig.Build());
+        } else {
+          index = LookupOrInsertFunction(vp->var());
         }
+        current_function_builder_->Emit(kExprCallFunction);
+        std::vector<uint8_t> index_arr = UnsignedLEB128From(index);
+        current_function_builder_->EmitCode(
+            &index_arr[0], static_cast<uint32_t>(index_arr.size()));
+        break;
+      }
+      case Call::KEYED_PROPERTY_CALL: {
+        DCHECK(in_function_);
+        Property* p = expr->expression()->AsProperty();
+        DCHECK_NOT_NULL(p);
+        VariableProxy* var = p->obj()->AsVariableProxy();
+        DCHECK_NOT_NULL(var);
+        FunctionTableIndices* indices = LookupFunctionTable(var->var());
+        current_function_builder_->EmitWithU8(kExprCallIndirect,
+                                              indices->signature_index);
+        current_function_builder_->Emit(kExprI32Add);
+        byte code[] = {WASM_I32(indices->start_index)};
+        current_function_builder_->EmitCode(code, sizeof(code));
+        RECURSE(Visit(p->key()));
         break;
       }
       default:
         UNREACHABLE();
     }
+    ZoneList<Expression*>* args = expr->arguments();
+    for (int i = 0; i < args->length(); ++i) {
+      Expression* arg = args->at(i);
+      RECURSE(Visit(arg));
+    }
   }
 
   void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
@@ -586,7 +827,7 @@
   void VisitUnaryOperation(UnaryOperation* expr) {
     switch (expr->op()) {
       case Token::NOT: {
-        DCHECK(TypeOf(expr->expression()) == kAstI32);
+        DCHECK_EQ(kAstI32, TypeOf(expr->expression()));
         current_function_builder_->Emit(kExprBoolNot);
         break;
       }
@@ -600,7 +841,7 @@
 
   bool MatchIntBinaryOperation(BinaryOperation* expr, Token::Value op,
                                int32_t val) {
-    DCHECK(expr->right() != nullptr);
+    DCHECK_NOT_NULL(expr->right());
     if (expr->op() == op && expr->right()->IsLiteral() &&
         TypeOf(expr) == kAstI32) {
       Literal* right = expr->right()->AsLiteral();
@@ -614,7 +855,7 @@
 
   bool MatchDoubleBinaryOperation(BinaryOperation* expr, Token::Value op,
                                   double val) {
-    DCHECK(expr->right() != nullptr);
+    DCHECK_NOT_NULL(expr->right());
     if (expr->op() == op && expr->right()->IsLiteral() &&
         TypeOf(expr) == kAstF64) {
       Literal* right = expr->right()->AsLiteral();
@@ -629,8 +870,9 @@
   enum ConvertOperation { kNone, kAsIs, kToInt, kToDouble };
 
   ConvertOperation MatchOr(BinaryOperation* expr) {
-    if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0)) {
-      return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt;
+    if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0) &&
+        (TypeOf(expr->left()) == kAstI32)) {
+      return kAsIs;
     } else {
       return kNone;
     }
@@ -647,12 +889,12 @@
 
   ConvertOperation MatchXor(BinaryOperation* expr) {
     if (MatchIntBinaryOperation(expr, Token::BIT_XOR, 0xffffffff)) {
-      DCHECK(TypeOf(expr->left()) == kAstI32);
-      DCHECK(TypeOf(expr->right()) == kAstI32);
+      DCHECK_EQ(kAstI32, TypeOf(expr->left()));
+      DCHECK_EQ(kAstI32, TypeOf(expr->right()));
       BinaryOperation* op = expr->left()->AsBinaryOperation();
       if (op != nullptr) {
         if (MatchIntBinaryOperation(op, Token::BIT_XOR, 0xffffffff)) {
-          DCHECK(TypeOf(op->right()) == kAstI32);
+          DCHECK_EQ(kAstI32, TypeOf(op->right()));
           if (TypeOf(op->left()) != kAstI32) {
             return kToInt;
           } else {
@@ -666,7 +908,7 @@
 
   ConvertOperation MatchMul(BinaryOperation* expr) {
     if (MatchDoubleBinaryOperation(expr, Token::MUL, 1.0)) {
-      DCHECK(TypeOf(expr->right()) == kAstF64);
+      DCHECK_EQ(kAstF64, TypeOf(expr->right()));
       if (TypeOf(expr->left()) != kAstF64) {
         return kToDouble;
       } else {
@@ -768,6 +1010,7 @@
         BINOP_CASE(Token::MUL, Mul, NON_SIGNED_BINOP, true);
         BINOP_CASE(Token::DIV, Div, SIGNED_BINOP, false);
         BINOP_CASE(Token::BIT_OR, Ior, NON_SIGNED_INT_BINOP, true);
+        BINOP_CASE(Token::BIT_AND, And, NON_SIGNED_INT_BINOP, true);
         BINOP_CASE(Token::BIT_XOR, Xor, NON_SIGNED_INT_BINOP, true);
         BINOP_CASE(Token::SHL, Shl, NON_SIGNED_INT_BINOP, true);
         BINOP_CASE(Token::SAR, ShrS, NON_SIGNED_INT_BINOP, true);
@@ -786,6 +1029,10 @@
           }
           break;
         }
+        case Token::COMMA: {
+          current_function_builder_->EmitWithU8(kExprBlock, 2);
+          break;
+        }
         default:
           UNREACHABLE();
       }
@@ -879,8 +1126,8 @@
   }
 
   TypeIndex TypeIndexOf(Expression* expr) {
-    DCHECK(expr->bounds().lower == expr->bounds().upper);
-    TypeImpl<ZoneTypeConfig>* type = expr->bounds().lower;
+    DCHECK_EQ(expr->bounds().lower, expr->bounds().upper);
+    Type* type = expr->bounds().lower;
     if (type->Is(cache_.kAsmFixnum)) {
       return kFixnum;
     } else if (type->Is(cache_.kAsmSigned)) {
@@ -929,17 +1176,14 @@
 
   void VisitDoExpression(DoExpression* expr) { UNREACHABLE(); }
 
-  void VisitRewritableAssignmentExpression(
-      RewritableAssignmentExpression* expr) {
-    UNREACHABLE();
-  }
+  void VisitRewritableExpression(RewritableExpression* expr) { UNREACHABLE(); }
 
   struct IndexContainer : public ZoneObject {
     uint16_t index;
   };
 
   uint16_t LookupOrInsertLocal(Variable* v, LocalType type) {
-    DCHECK(current_function_builder_ != nullptr);
+    DCHECK_NOT_NULL(current_function_builder_);
     ZoneHashMap::Entry* entry =
         local_variables_.Lookup(v, ComputePointerHash(v));
     if (entry == nullptr) {
@@ -974,7 +1218,7 @@
   }
 
   uint16_t LookupOrInsertFunction(Variable* v) {
-    DCHECK(builder_ != nullptr);
+    DCHECK_NOT_NULL(builder_);
     ZoneHashMap::Entry* entry = functions_.Lookup(v, ComputePointerHash(v));
     if (entry == nullptr) {
       uint16_t index = builder_->AddFunction();
@@ -988,11 +1232,11 @@
   }
 
   LocalType TypeOf(Expression* expr) {
-    DCHECK(expr->bounds().lower == expr->bounds().upper);
+    DCHECK_EQ(expr->bounds().lower, expr->bounds().upper);
     return TypeFrom(expr->bounds().lower);
   }
 
-  LocalType TypeFrom(TypeImpl<ZoneTypeConfig>* type) {
+  LocalType TypeFrom(Type* type) {
     if (type->Is(cache_.kAsmInt)) {
       return kAstI32;
     } else if (type->Is(cache_.kAsmFloat)) {
@@ -1017,10 +1261,14 @@
   FunctionLiteral* literal_;
   Isolate* isolate_;
   Zone* zone_;
+  Handle<Object> foreign_;
   TypeCache const& cache_;
   ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
   int block_size_;
-  uint16_t init_function_index;
+  uint16_t init_function_index_;
+  uint32_t next_table_index_;
+  ZoneHashMap function_tables_;
+  ImportedFunctionTable imported_function_table_;
 
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 
@@ -1029,13 +1277,13 @@
 };
 
 AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
-                               FunctionLiteral* literal)
-    : isolate_(isolate), zone_(zone), literal_(literal) {}
+                               FunctionLiteral* literal, Handle<Object> foreign)
+    : isolate_(isolate), zone_(zone), literal_(literal), foreign_(foreign) {}
 
 // TODO(aseemgarg): probably should take zone (to write wasm to) as input so
 // that zone in constructor may be thrown away once wasm module is written.
 WasmModuleIndex* AsmWasmBuilder::Run() {
-  AsmWasmBuilderImpl impl(isolate_, zone_, literal_);
+  AsmWasmBuilderImpl impl(isolate_, zone_, literal_, foreign_);
   impl.Compile();
   WasmModuleWriter* writer = impl.builder_->Build(zone_);
   return writer->WriteTo(zone_);
diff --git a/src/wasm/asm-wasm-builder.h b/src/wasm/asm-wasm-builder.h
index cb568db..9b761f9 100644
--- a/src/wasm/asm-wasm-builder.h
+++ b/src/wasm/asm-wasm-builder.h
@@ -6,6 +6,7 @@
 #define V8_WASM_ASM_WASM_BUILDER_H_
 
 #include "src/allocation.h"
+#include "src/objects.h"
 #include "src/wasm/encoder.h"
 #include "src/zone.h"
 
@@ -18,13 +19,15 @@
 
 class AsmWasmBuilder {
  public:
-  explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root);
+  explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root,
+                          Handle<Object> foreign);
   WasmModuleIndex* Run();
 
  private:
   Isolate* isolate_;
   Zone* zone_;
   FunctionLiteral* literal_;
+  Handle<Object> foreign_;
 };
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/ast-decoder.cc b/src/wasm/ast-decoder.cc
index ffb8157..c97c781 100644
--- a/src/wasm/ast-decoder.cc
+++ b/src/wasm/ast-decoder.cc
@@ -5,6 +5,7 @@
 #include "src/base/platform/elapsed-timer.h"
 #include "src/signature.h"
 
+#include "src/bit-vector.h"
 #include "src/flags.h"
 #include "src/handles.h"
 #include "src/zone-containers.h"
@@ -40,7 +41,6 @@
   WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc); }
 };
 
-
 // A production represents an incomplete decoded tree in the LR decoder.
 struct Production {
   Tree* tree;  // the root of the syntax tree.
@@ -97,13 +97,278 @@
 #define BUILD0(func) (build() ? builder_->func() : nullptr)
 
 
+// Generic Wasm bytecode decoder with utilities for decoding operands,
+// lengths, etc.
+class WasmDecoder : public Decoder {
+ public:
+  WasmDecoder() : Decoder(nullptr, nullptr), function_env_(nullptr) {}
+  WasmDecoder(FunctionEnv* env, const byte* start, const byte* end)
+      : Decoder(start, end), function_env_(env) {}
+  FunctionEnv* function_env_;
+
+  void Reset(FunctionEnv* function_env, const byte* start, const byte* end) {
+    Decoder::Reset(start, end);
+    function_env_ = function_env;
+  }
+
+  byte ByteOperand(const byte* pc, const char* msg = "missing 1-byte operand") {
+    if ((pc + sizeof(byte)) >= limit_) {
+      error(pc, msg);
+      return 0;
+    }
+    return pc[1];
+  }
+
+  uint32_t Uint32Operand(const byte* pc) {
+    if ((pc + sizeof(uint32_t)) >= limit_) {
+      error(pc, "missing 4-byte operand");
+      return 0;
+    }
+    return read_u32(pc + 1);
+  }
+
+  uint64_t Uint64Operand(const byte* pc) {
+    if ((pc + sizeof(uint64_t)) >= limit_) {
+      error(pc, "missing 8-byte operand");
+      return 0;
+    }
+    return read_u64(pc + 1);
+  }
+
+  inline bool Validate(const byte* pc, LocalIndexOperand& operand) {
+    if (operand.index < function_env_->total_locals) {
+      operand.type = function_env_->GetLocalType(operand.index);
+      return true;
+    }
+    error(pc, pc + 1, "invalid local index");
+    return false;
+  }
+
+  inline bool Validate(const byte* pc, GlobalIndexOperand& operand) {
+    ModuleEnv* m = function_env_->module;
+    if (m && m->module && operand.index < m->module->globals->size()) {
+      operand.machine_type = m->module->globals->at(operand.index).type;
+      operand.type = WasmOpcodes::LocalTypeFor(operand.machine_type);
+      return true;
+    }
+    error(pc, pc + 1, "invalid global index");
+    return false;
+  }
+
+  inline bool Validate(const byte* pc, FunctionIndexOperand& operand) {
+    ModuleEnv* m = function_env_->module;
+    if (m && m->module && operand.index < m->module->functions->size()) {
+      operand.sig = m->module->functions->at(operand.index).sig;
+      return true;
+    }
+    error(pc, pc + 1, "invalid function index");
+    return false;
+  }
+
+  inline bool Validate(const byte* pc, SignatureIndexOperand& operand) {
+    ModuleEnv* m = function_env_->module;
+    if (m && m->module && operand.index < m->module->signatures->size()) {
+      operand.sig = m->module->signatures->at(operand.index);
+      return true;
+    }
+    error(pc, pc + 1, "invalid signature index");
+    return false;
+  }
+
+  inline bool Validate(const byte* pc, ImportIndexOperand& operand) {
+    ModuleEnv* m = function_env_->module;
+    if (m && m->module && operand.index < m->module->import_table->size()) {
+      operand.sig = m->module->import_table->at(operand.index).sig;
+      return true;
+    }
+    error(pc, pc + 1, "invalid signature index");
+    return false;
+  }
+
+  inline bool Validate(const byte* pc, BreakDepthOperand& operand,
+                       ZoneVector<Block>& blocks) {
+    if (operand.depth < blocks.size()) {
+      operand.target = &blocks[blocks.size() - operand.depth - 1];
+      return true;
+    }
+    error(pc, pc + 1, "invalid break depth");
+    return false;
+  }
+
+  bool Validate(const byte* pc, TableSwitchOperand& operand,
+                size_t block_depth) {
+    if (operand.table_count == 0) {
+      error(pc, "tableswitch with 0 entries");
+      return false;
+    }
+    // Verify table.
+    for (uint32_t i = 0; i < operand.table_count; i++) {
+      uint16_t target = operand.read_entry(this, i);
+      if (target >= 0x8000) {
+        size_t depth = target - 0x8000;
+        if (depth > block_depth) {
+          error(operand.table + i * 2, "improper branch in tableswitch");
+          return false;
+        }
+      } else {
+        if (target >= operand.case_count) {
+          error(operand.table + i * 2, "invalid case target in tableswitch");
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
+  int OpcodeArity(const byte* pc) {
+#define DECLARE_ARITY(name, ...)                          \
+  static const LocalType kTypes_##name[] = {__VA_ARGS__}; \
+  static const int kArity_##name =                        \
+      static_cast<int>(arraysize(kTypes_##name) - 1);
+
+    FOREACH_SIGNATURE(DECLARE_ARITY);
+#undef DECLARE_ARITY
+
+    switch (static_cast<WasmOpcode>(*pc)) {
+      case kExprI8Const:
+      case kExprI32Const:
+      case kExprI64Const:
+      case kExprF64Const:
+      case kExprF32Const:
+      case kExprGetLocal:
+      case kExprLoadGlobal:
+      case kExprNop:
+      case kExprUnreachable:
+        return 0;
+
+      case kExprBr:
+      case kExprStoreGlobal:
+      case kExprSetLocal:
+        return 1;
+
+      case kExprIf:
+      case kExprBrIf:
+        return 2;
+      case kExprIfElse:
+      case kExprSelect:
+        return 3;
+
+      case kExprBlock:
+      case kExprLoop: {
+        BlockCountOperand operand(this, pc);
+        return operand.count;
+      }
+
+      case kExprCallFunction: {
+        FunctionIndexOperand operand(this, pc);
+        return static_cast<int>(
+            function_env_->module->GetFunctionSignature(operand.index)
+                ->parameter_count());
+      }
+      case kExprCallIndirect: {
+        SignatureIndexOperand operand(this, pc);
+        return 1 + static_cast<int>(
+                       function_env_->module->GetSignature(operand.index)
+                           ->parameter_count());
+      }
+      case kExprCallImport: {
+        ImportIndexOperand operand(this, pc);
+        return static_cast<int>(
+            function_env_->module->GetImportSignature(operand.index)
+                ->parameter_count());
+      }
+      case kExprReturn: {
+        return static_cast<int>(function_env_->sig->return_count());
+      }
+      case kExprTableSwitch: {
+        TableSwitchOperand operand(this, pc);
+        return 1 + operand.case_count;
+      }
+
+#define DECLARE_OPCODE_CASE(name, opcode, sig) \
+  case kExpr##name:                            \
+    return kArity_##sig;
+
+        FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+        FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
+        FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
+        FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+    }
+    UNREACHABLE();
+    return 0;
+  }
+
+  int OpcodeLength(const byte* pc) {
+    switch (static_cast<WasmOpcode>(*pc)) {
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+      FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+      FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+      {
+        MemoryAccessOperand operand(this, pc);
+        return 1 + operand.length;
+      }
+      case kExprBlock:
+      case kExprLoop: {
+        BlockCountOperand operand(this, pc);
+        return 1 + operand.length;
+      }
+      case kExprBr:
+      case kExprBrIf: {
+        BreakDepthOperand operand(this, pc);
+        return 1 + operand.length;
+      }
+      case kExprStoreGlobal:
+      case kExprLoadGlobal: {
+        GlobalIndexOperand operand(this, pc);
+        return 1 + operand.length;
+      }
+
+      case kExprCallFunction: {
+        FunctionIndexOperand operand(this, pc);
+        return 1 + operand.length;
+      }
+      case kExprCallIndirect: {
+        SignatureIndexOperand operand(this, pc);
+        return 1 + operand.length;
+      }
+      case kExprCallImport: {
+        ImportIndexOperand operand(this, pc);
+        return 1 + operand.length;
+      }
+
+      case kExprSetLocal:
+      case kExprGetLocal: {
+        LocalIndexOperand operand(this, pc);
+        return 1 + operand.length;
+      }
+      case kExprTableSwitch: {
+        TableSwitchOperand operand(this, pc);
+        return 1 + operand.length;
+      }
+      case kExprI8Const:
+        return 2;
+      case kExprI32Const:
+      case kExprF32Const:
+        return 5;
+      case kExprI64Const:
+      case kExprF64Const:
+        return 9;
+
+      default:
+        return 1;
+    }
+  }
+};
+
+
 // A shift-reduce-parser strategy for decoding Wasm code that uses an explicit
 // shift-reduce strategy with multiple internal stacks.
-class LR_WasmDecoder : public Decoder {
+class LR_WasmDecoder : public WasmDecoder {
  public:
   LR_WasmDecoder(Zone* zone, TFBuilder* builder)
-      : Decoder(nullptr, nullptr),
-        zone_(zone),
+      : zone_(zone),
         builder_(builder),
         trees_(zone),
         stack_(zone),
@@ -127,8 +392,7 @@
     }
 
     base_ = base;
-    Reset(pc, end);
-    function_env_ = function_env;
+    Reset(function_env, pc, end);
 
     InitSsaEnv();
     DecodeFunctionBody();
@@ -151,15 +415,20 @@
     }
 
     if (ok()) {
+      if (FLAG_trace_wasm_ast) {
+        PrintAst(function_env, pc, end);
+      }
       if (FLAG_trace_wasm_decode_time) {
         double ms = decode_timer.Elapsed().InMillisecondsF();
-        PrintF(" - decoding took %0.3f ms\n", ms);
+        PrintF("wasm-decode ok (%0.3f ms)\n\n", ms);
+      } else {
+        TRACE("wasm-decode ok\n\n");
       }
-      TRACE("wasm-decode ok\n\n");
     } else {
       TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
             startrel(error_pc_), error_msg_.get());
     }
+
     return toResult(tree);
   }
 
@@ -172,7 +441,6 @@
   TreeResult result_;
 
   SsaEnv* ssa_env_;
-  FunctionEnv* function_env_;
 
   ZoneVector<Tree*> trees_;
   ZoneVector<Production> stack_;
@@ -199,30 +467,30 @@
         ssa_env->locals[pos++] = builder_->Param(i, sig->GetParam(i));
       }
       // Initialize int32 locals.
-      if (function_env_->local_int32_count > 0) {
+      if (function_env_->local_i32_count > 0) {
         TFNode* zero = builder_->Int32Constant(0);
-        for (uint32_t i = 0; i < function_env_->local_int32_count; i++) {
+        for (uint32_t i = 0; i < function_env_->local_i32_count; i++) {
           ssa_env->locals[pos++] = zero;
         }
       }
       // Initialize int64 locals.
-      if (function_env_->local_int64_count > 0) {
+      if (function_env_->local_i64_count > 0) {
         TFNode* zero = builder_->Int64Constant(0);
-        for (uint32_t i = 0; i < function_env_->local_int64_count; i++) {
+        for (uint32_t i = 0; i < function_env_->local_i64_count; i++) {
           ssa_env->locals[pos++] = zero;
         }
       }
       // Initialize float32 locals.
-      if (function_env_->local_float32_count > 0) {
+      if (function_env_->local_f32_count > 0) {
         TFNode* zero = builder_->Float32Constant(0);
-        for (uint32_t i = 0; i < function_env_->local_float32_count; i++) {
+        for (uint32_t i = 0; i < function_env_->local_f32_count; i++) {
           ssa_env->locals[pos++] = zero;
         }
       }
       // Initialize float64 locals.
-      if (function_env_->local_float64_count > 0) {
+      if (function_env_->local_f64_count > 0) {
         TFNode* zero = builder_->Float64Constant(0);
-        for (uint32_t i = 0; i < function_env_->local_float64_count; i++) {
+        for (uint32_t i = 0; i < function_env_->local_f64_count; i++) {
           ssa_env->locals[pos++] = zero;
         }
       }
@@ -329,25 +597,25 @@
           Leaf(kAstStmt);
           break;
         case kExprBlock: {
-          int length = Operand<uint8_t>(pc_);
-          if (length < 1) {
+          BlockCountOperand operand(this, pc_);
+          if (operand.count < 1) {
             Leaf(kAstStmt);
           } else {
-            Shift(kAstEnd, length);
+            Shift(kAstEnd, operand.count);
             // The break environment is the outer environment.
             SsaEnv* break_env = ssa_env_;
             PushBlock(break_env);
             SetEnv("block:start", Steal(break_env));
           }
-          len = 2;
+          len = 1 + operand.length;
           break;
         }
         case kExprLoop: {
-          int length = Operand<uint8_t>(pc_);
-          if (length < 1) {
+          BlockCountOperand operand(this, pc_);
+          if (operand.count < 1) {
             Leaf(kAstStmt);
           } else {
-            Shift(kAstEnd, length);
+            Shift(kAstEnd, operand.count);
             // The break environment is the outer environment.
             SsaEnv* break_env = ssa_env_;
             PushBlock(break_env);
@@ -359,7 +627,7 @@
             PushBlock(cont_env);
             blocks_.back().stack_depth = -1;  // no production for inner block.
           }
-          len = 2;
+          len = 1 + operand.length;
           break;
         }
         case kExprIf:
@@ -372,59 +640,27 @@
           Shift(kAstStmt, 3);  // Result type is typeof(x) in {c ? x : y}.
           break;
         case kExprBr: {
-          uint32_t depth = Operand<uint8_t>(pc_);
-          Shift(kAstEnd, 1);
-          if (depth >= blocks_.size()) {
-            error("improperly nested branch");
+          BreakDepthOperand operand(this, pc_);
+          if (Validate(pc_, operand, blocks_)) {
+            Shift(kAstEnd, 1);
           }
-          len = 2;
+          len = 1 + operand.length;
           break;
         }
         case kExprBrIf: {
-          uint32_t depth = Operand<uint8_t>(pc_);
-          Shift(kAstStmt, 2);
-          if (depth >= blocks_.size()) {
-            error("improperly nested conditional branch");
+          BreakDepthOperand operand(this, pc_);
+          if (Validate(pc_, operand, blocks_)) {
+            Shift(kAstStmt, 2);
           }
-          len = 2;
+          len = 1 + operand.length;
           break;
         }
         case kExprTableSwitch: {
-          if (!checkAvailable(5)) {
-            error("expected #tableswitch <cases> <table>, fell off end");
-            break;
+          TableSwitchOperand operand(this, pc_);
+          if (Validate(pc_, operand, blocks_.size())) {
+            Shift(kAstEnd, 1 + operand.case_count);
           }
-          uint16_t case_count = *reinterpret_cast<const uint16_t*>(pc_ + 1);
-          uint16_t table_count = *reinterpret_cast<const uint16_t*>(pc_ + 3);
-          len = 5 + table_count * 2;
-
-          if (table_count == 0) {
-            error("tableswitch with 0 entries");
-            break;
-          }
-
-          if (!checkAvailable(len)) {
-            error("expected #tableswitch <cases> <table>, fell off end");
-            break;
-          }
-
-          Shift(kAstEnd, 1 + case_count);
-
-          // Verify table.
-          for (int i = 0; i < table_count; i++) {
-            uint16_t target =
-                *reinterpret_cast<const uint16_t*>(pc_ + 5 + i * 2);
-            if (target >= 0x8000) {
-              size_t depth = target - 0x8000;
-              if (depth > blocks_.size()) {
-                error(pc_ + 5 + i * 2, "improper branch in tableswitch");
-              }
-            } else {
-              if (target >= case_count) {
-                error(pc_ + 5 + i * 2, "invalid case target in tableswitch");
-              }
-            }
-          }
+          len = 1 + operand.length;
           break;
         }
         case kExprReturn: {
@@ -445,59 +681,66 @@
           break;
         }
         case kExprI8Const: {
-          int32_t value = Operand<int8_t>(pc_);
-          Leaf(kAstI32, BUILD(Int32Constant, value));
-          len = 2;
+          ImmI8Operand operand(this, pc_);
+          Leaf(kAstI32, BUILD(Int32Constant, operand.value));
+          len = 1 + operand.length;
           break;
         }
         case kExprI32Const: {
-          int32_t value = Operand<int32_t>(pc_);
-          Leaf(kAstI32, BUILD(Int32Constant, value));
-          len = 5;
+          ImmI32Operand operand(this, pc_);
+          Leaf(kAstI32, BUILD(Int32Constant, operand.value));
+          len = 1 + operand.length;
           break;
         }
         case kExprI64Const: {
-          int64_t value = Operand<int64_t>(pc_);
-          Leaf(kAstI64, BUILD(Int64Constant, value));
-          len = 9;
+          ImmI64Operand operand(this, pc_);
+          Leaf(kAstI64, BUILD(Int64Constant, operand.value));
+          len = 1 + operand.length;
           break;
         }
         case kExprF32Const: {
-          float value = Operand<float>(pc_);
-          Leaf(kAstF32, BUILD(Float32Constant, value));
-          len = 5;
+          ImmF32Operand operand(this, pc_);
+          Leaf(kAstF32, BUILD(Float32Constant, operand.value));
+          len = 1 + operand.length;
           break;
         }
         case kExprF64Const: {
-          double value = Operand<double>(pc_);
-          Leaf(kAstF64, BUILD(Float64Constant, value));
-          len = 9;
+          ImmF64Operand operand(this, pc_);
+          Leaf(kAstF64, BUILD(Float64Constant, operand.value));
+          len = 1 + operand.length;
           break;
         }
         case kExprGetLocal: {
-          uint32_t index;
-          LocalType type = LocalOperand(pc_, &index, &len);
-          TFNode* val =
-              build() && type != kAstStmt ? ssa_env_->locals[index] : nullptr;
-          Leaf(type, val);
+          LocalIndexOperand operand(this, pc_);
+          if (Validate(pc_, operand)) {
+            TFNode* val = build() ? ssa_env_->locals[operand.index] : nullptr;
+            Leaf(operand.type, val);
+          }
+          len = 1 + operand.length;
           break;
         }
         case kExprSetLocal: {
-          uint32_t index;
-          LocalType type = LocalOperand(pc_, &index, &len);
-          Shift(type, 1);
+          LocalIndexOperand operand(this, pc_);
+          if (Validate(pc_, operand)) {
+            Shift(operand.type, 1);
+          }
+          len = 1 + operand.length;
           break;
         }
         case kExprLoadGlobal: {
-          uint32_t index;
-          LocalType type = GlobalOperand(pc_, &index, &len);
-          Leaf(type, BUILD(LoadGlobal, index));
+          GlobalIndexOperand operand(this, pc_);
+          if (Validate(pc_, operand)) {
+            Leaf(operand.type, BUILD(LoadGlobal, operand.index));
+          }
+          len = 1 + operand.length;
           break;
         }
         case kExprStoreGlobal: {
-          uint32_t index;
-          LocalType type = GlobalOperand(pc_, &index, &len);
-          Shift(type, 1);
+          GlobalIndexOperand operand(this, pc_);
+          if (Validate(pc_, operand)) {
+            Shift(operand.type, 1);
+          }
+          len = 1 + operand.length;
           break;
         }
         case kExprI32LoadMem8S:
@@ -546,27 +789,36 @@
           Shift(kAstI32, 1);
           break;
         case kExprCallFunction: {
-          uint32_t unused;
-          FunctionSig* sig = FunctionSigOperand(pc_, &unused, &len);
-          if (sig) {
-            LocalType type =
-                sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
-            Shift(type, static_cast<int>(sig->parameter_count()));
-          } else {
-            Leaf(kAstI32);  // error
+          FunctionIndexOperand operand(this, pc_);
+          if (Validate(pc_, operand)) {
+            LocalType type = operand.sig->return_count() == 0
+                                 ? kAstStmt
+                                 : operand.sig->GetReturn();
+            Shift(type, static_cast<int>(operand.sig->parameter_count()));
           }
+          len = 1 + operand.length;
           break;
         }
         case kExprCallIndirect: {
-          uint32_t unused;
-          FunctionSig* sig = SigOperand(pc_, &unused, &len);
-          if (sig) {
-            LocalType type =
-                sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
-            Shift(type, static_cast<int>(1 + sig->parameter_count()));
-          } else {
-            Leaf(kAstI32);  // error
+          SignatureIndexOperand operand(this, pc_);
+          if (Validate(pc_, operand)) {
+            LocalType type = operand.sig->return_count() == 0
+                                 ? kAstStmt
+                                 : operand.sig->GetReturn();
+            Shift(type, static_cast<int>(1 + operand.sig->parameter_count()));
           }
+          len = 1 + operand.length;
+          break;
+        }
+        case kExprCallImport: {
+          ImportIndexOperand operand(this, pc_);
+          if (Validate(pc_, operand)) {
+            LocalType type = operand.sig->return_count() == 0
+                                 ? kAstStmt
+                                 : operand.sig->GetReturn();
+            Shift(type, static_cast<int>(operand.sig->parameter_count()));
+          }
+          len = 1 + operand.length;
           break;
         }
         default:
@@ -589,19 +841,15 @@
   }
 
   int DecodeLoadMem(const byte* pc, LocalType type) {
-    int length = 2;
-    uint32_t offset;
-    MemoryAccessOperand(pc, &length, &offset);
+    MemoryAccessOperand operand(this, pc);
     Shift(type, 1);
-    return length;
+    return 1 + operand.length;
   }
 
   int DecodeStoreMem(const byte* pc, LocalType type) {
-    int length = 2;
-    uint32_t offset;
-    MemoryAccessOperand(pc, &length, &offset);
+    MemoryAccessOperand operand(this, pc);
     Shift(type, 2);
-    return length;
+    return 1 + operand.length;
   }
 
   void AddImplicitReturnAtEnd() {
@@ -747,26 +995,26 @@
       }
       case kExprSelect: {
         if (p->index == 1) {
-          // Condition done.
-          TypeCheckLast(p, kAstI32);
-        } else if (p->index == 2) {
           // True expression done.
           p->tree->type = p->last()->type;
           if (p->tree->type == kAstStmt) {
             error(p->pc(), p->tree->children[1]->pc,
                   "select operand should be expression");
           }
-        } else {
+        } else if (p->index == 2) {
           // False expression done.
-          DCHECK(p->done());
           TypeCheckLast(p, p->tree->type);
+        } else {
+          // Condition done.
+          DCHECK(p->done());
+          TypeCheckLast(p, kAstI32);
           if (build()) {
             TFNode* controls[2];
-            builder_->Branch(p->tree->children[0]->node, &controls[0],
+            builder_->Branch(p->tree->children[2]->node, &controls[0],
                              &controls[1]);
             TFNode* merge = builder_->Merge(2, controls);
-            TFNode* vals[2] = {p->tree->children[1]->node,
-                               p->tree->children[2]->node};
+            TFNode* vals[2] = {p->tree->children[0]->node,
+                               p->tree->children[1]->node};
             TFNode* phi = builder_->Phi(p->tree->type, 2, vals, merge);
             p->tree->node = phi;
             ssa_env_->control = merge;
@@ -775,64 +1023,44 @@
         break;
       }
       case kExprBr: {
-        uint32_t depth = Operand<uint8_t>(p->pc());
-        if (depth >= blocks_.size()) {
-          error("improperly nested branch");
-          break;
-        }
-        Block* block = &blocks_[blocks_.size() - depth - 1];
-        ReduceBreakToExprBlock(p, block);
+        BreakDepthOperand operand(this, p->pc());
+        CHECK(Validate(p->pc(), operand, blocks_));
+        ReduceBreakToExprBlock(p, operand.target);
         break;
       }
       case kExprBrIf: {
-        if (p->index == 1) {
+        if (p->done()) {
           TypeCheckLast(p, kAstI32);
-        } else if (p->done()) {
-          uint32_t depth = Operand<uint8_t>(p->pc());
-          if (depth >= blocks_.size()) {
-            error("improperly nested branch");
-            break;
-          }
-          Block* block = &blocks_[blocks_.size() - depth - 1];
+          BreakDepthOperand operand(this, p->pc());
+          CHECK(Validate(p->pc(), operand, blocks_));
           SsaEnv* fenv = ssa_env_;
           SsaEnv* tenv = Split(fenv);
-          BUILD(Branch, p->tree->children[0]->node, &tenv->control,
+          BUILD(Branch, p->tree->children[1]->node, &tenv->control,
                 &fenv->control);
           ssa_env_ = tenv;
-          ReduceBreakToExprBlock(p, block);
+          ReduceBreakToExprBlock(p, operand.target, p->tree->children[0]);
           ssa_env_ = fenv;
         }
         break;
       }
       case kExprTableSwitch: {
-        uint16_t table_count = *reinterpret_cast<const uint16_t*>(p->pc() + 3);
-        if (table_count == 1) {
-          // Degenerate switch with only a default target.
-          if (p->index == 1) {
-            SsaEnv* break_env = ssa_env_;
-            PushBlock(break_env);
-            SetEnv("switch:default", Steal(break_env));
-          }
-          if (p->done()) {
-            Block* block = &blocks_.back();
-            // fall through to the end.
-            ReduceBreakToExprBlock(p, block);
-            SetEnv("switch:end", block->ssa_env);
-            blocks_.pop_back();
-          }
-          break;
-        }
-
         if (p->index == 1) {
           // Switch key finished.
           TypeCheckLast(p, kAstI32);
+          if (failed()) break;
 
-          TFNode* sw = BUILD(Switch, table_count, p->last()->node);
+          TableSwitchOperand operand(this, p->pc());
+          DCHECK(Validate(p->pc(), operand, blocks_.size()));
+
+          // Build the switch only if it has more than just a default target.
+          bool build_switch = operand.table_count > 1;
+          TFNode* sw = nullptr;
+          if (build_switch)
+            sw = BUILD(Switch, operand.table_count, p->last()->node);
 
           // Allocate environments for each case.
-          uint16_t case_count = *reinterpret_cast<const uint16_t*>(p->pc() + 1);
-          SsaEnv** case_envs = zone_->NewArray<SsaEnv*>(case_count);
-          for (int i = 0; i < case_count; i++) {
+          SsaEnv** case_envs = zone_->NewArray<SsaEnv*>(operand.case_count);
+          for (uint32_t i = 0; i < operand.case_count; i++) {
             case_envs[i] = UnreachableEnv();
           }
 
@@ -843,13 +1071,15 @@
           ssa_env_ = copy;
 
           // Build the environments for each case based on the table.
-          const uint16_t* table =
-              reinterpret_cast<const uint16_t*>(p->pc() + 5);
-          for (int i = 0; i < table_count; i++) {
-            uint16_t target = table[i];
-            SsaEnv* env = Split(copy);
-            env->control = (i == table_count - 1) ? BUILD(IfDefault, sw)
-                                                  : BUILD(IfValue, i, sw);
+          for (uint32_t i = 0; i < operand.table_count; i++) {
+            uint16_t target = operand.read_entry(this, i);
+            SsaEnv* env = copy;
+            if (build_switch) {
+              env = Split(env);
+              env->control = (i == operand.table_count - 1)
+                                 ? BUILD(IfDefault, sw)
+                                 : BUILD(IfValue, i, sw);
+            }
             if (target >= 0x8000) {
               // Targets an outer block.
               int depth = target - 0x8000;
@@ -860,25 +1090,21 @@
               Goto(env, case_envs[target]);
             }
           }
+        }
 
-          // Switch to the environment for the first case.
-          SetEnv("switch:case", case_envs[0]);
+        if (p->done()) {
+          // Last case. Fall through to the end.
+          Block* block = &blocks_.back();
+          if (p->index > 1) ReduceBreakToExprBlock(p, block);
+          SsaEnv* next = block->ssa_env;
+          blocks_.pop_back();
+          ifs_.pop_back();
+          SetEnv("switch:end", next);
         } else {
-          // Switch case finished.
-          if (p->done()) {
-            // Last case. Fall through to the end.
-            Block* block = &blocks_.back();
-            ReduceBreakToExprBlock(p, block);
-            SsaEnv* next = block->ssa_env;
-            blocks_.pop_back();
-            ifs_.pop_back();
-            SetEnv("switch:end", next);
-          } else {
-            // Interior case. Maybe fall through to the next case.
-            SsaEnv* next = ifs_.back().case_envs[p->index - 1];
-            if (ssa_env_->go()) Goto(ssa_env_, next);
-            SetEnv("switch:case", next);
-          }
+          // Interior case. Maybe fall through to the next case.
+          SsaEnv* next = ifs_.back().case_envs[p->index - 1];
+          if (p->index > 1 && ssa_env_->go()) Goto(ssa_env_, next);
+          SetEnv("switch:case", next);
         }
         break;
       }
@@ -898,12 +1124,11 @@
         break;
       }
       case kExprSetLocal: {
-        int unused = 0;
-        uint32_t index;
-        LocalType type = LocalOperand(p->pc(), &index, &unused);
+        LocalIndexOperand operand(this, p->pc());
+        CHECK(Validate(p->pc(), operand));
         Tree* val = p->last();
-        if (type == val->type) {
-          if (build()) ssa_env_->locals[index] = val->node;
+        if (operand.type == val->type) {
+          if (build()) ssa_env_->locals[operand.index] = val->node;
           p->tree->node = val->node;
         } else {
           error(p->pc(), val->pc, "Typecheck failed in SetLocal");
@@ -911,12 +1136,11 @@
         break;
       }
       case kExprStoreGlobal: {
-        int unused = 0;
-        uint32_t index;
-        LocalType type = GlobalOperand(p->pc(), &index, &unused);
+        GlobalIndexOperand operand(this, p->pc());
+        CHECK(Validate(p->pc(), operand));
         Tree* val = p->last();
-        if (type == val->type) {
-          BUILD(StoreGlobal, index, val->node);
+        if (operand.type == val->type) {
+          BUILD(StoreGlobal, operand.index, val->node);
           p->tree->node = val->node;
         } else {
           error(p->pc(), val->pc, "Typecheck failed in StoreGlobal");
@@ -985,34 +1209,29 @@
         return;
 
       case kExprCallFunction: {
-        int len;
-        uint32_t index;
-        FunctionSig* sig = FunctionSigOperand(p->pc(), &index, &len);
-        if (!sig) break;
+        FunctionIndexOperand operand(this, p->pc());
+        CHECK(Validate(p->pc(), operand));
         if (p->index > 0) {
-          TypeCheckLast(p, sig->GetParam(p->index - 1));
+          TypeCheckLast(p, operand.sig->GetParam(p->index - 1));
         }
         if (p->done() && build()) {
           uint32_t count = p->tree->count + 1;
           TFNode** buffer = builder_->Buffer(count);
-          FunctionSig* sig = FunctionSigOperand(p->pc(), &index, &len);
-          USE(sig);
           buffer[0] = nullptr;  // reserved for code object.
           for (uint32_t i = 1; i < count; i++) {
             buffer[i] = p->tree->children[i - 1]->node;
           }
-          p->tree->node = builder_->CallDirect(index, buffer);
+          p->tree->node = builder_->CallDirect(operand.index, buffer);
         }
         break;
       }
       case kExprCallIndirect: {
-        int len;
-        uint32_t index;
-        FunctionSig* sig = SigOperand(p->pc(), &index, &len);
+        SignatureIndexOperand operand(this, p->pc());
+        CHECK(Validate(p->pc(), operand));
         if (p->index == 1) {
           TypeCheckLast(p, kAstI32);
         } else {
-          TypeCheckLast(p, sig->GetParam(p->index - 2));
+          TypeCheckLast(p, operand.sig->GetParam(p->index - 2));
         }
         if (p->done() && build()) {
           uint32_t count = p->tree->count;
@@ -1020,7 +1239,24 @@
           for (uint32_t i = 0; i < count; i++) {
             buffer[i] = p->tree->children[i]->node;
           }
-          p->tree->node = builder_->CallIndirect(index, buffer);
+          p->tree->node = builder_->CallIndirect(operand.index, buffer);
+        }
+        break;
+      }
+      case kExprCallImport: {
+        ImportIndexOperand operand(this, p->pc());
+        CHECK(Validate(p->pc(), operand));
+        if (p->index > 0) {
+          TypeCheckLast(p, operand.sig->GetParam(p->index - 1));
+        }
+        if (p->done() && build()) {
+          uint32_t count = p->tree->count + 1;
+          TFNode** buffer = builder_->Buffer(count);
+          buffer[0] = nullptr;  // reserved for code object.
+          for (uint32_t i = 1; i < count; i++) {
+            buffer[i] = p->tree->children[i - 1]->node;
+          }
+          p->tree->node = builder_->CallImport(operand.index, buffer);
         }
         break;
       }
@@ -1030,13 +1266,17 @@
   }
 
   void ReduceBreakToExprBlock(Production* p, Block* block) {
+    ReduceBreakToExprBlock(p, block, p->tree->count > 0 ? p->last() : nullptr);
+  }
+
+  void ReduceBreakToExprBlock(Production* p, Block* block, Tree* val) {
     if (block->stack_depth < 0) {
       // This is the inner loop block, which does not have a value.
       Goto(ssa_env_, block->ssa_env);
     } else {
       // Merge the value into the production for the block.
       Production* bp = &stack_[block->stack_depth];
-      MergeIntoProduction(bp, block->ssa_env, p->last());
+      MergeIntoProduction(bp, block->ssa_env, val);
     }
   }
 
@@ -1045,7 +1285,7 @@
 
     bool first = target->state == SsaEnv::kUnreachable;
     Goto(ssa_env_, target);
-    if (expr->type == kAstEnd) return;
+    if (expr == nullptr || expr->type == kAstEnd) return;
 
     if (first) {
       // first merge to this environment; set the type and the node.
@@ -1069,11 +1309,9 @@
     DCHECK_EQ(1, p->index);
     TypeCheckLast(p, kAstI32);  // index
     if (build()) {
-      int length = 0;
-      uint32_t offset = 0;
-      MemoryAccessOperand(p->pc(), &length, &offset);
+      MemoryAccessOperand operand(this, p->pc());
       p->tree->node =
-          builder_->LoadMem(type, mem_type, p->last()->node, offset);
+          builder_->LoadMem(type, mem_type, p->last()->node, operand.offset);
     }
   }
 
@@ -1084,11 +1322,10 @@
       DCHECK_EQ(2, p->index);
       TypeCheckLast(p, type);
       if (build()) {
-        int length = 0;
-        uint32_t offset = 0;
-        MemoryAccessOperand(p->pc(), &length, &offset);
+        MemoryAccessOperand operand(this, p->pc());
         TFNode* val = p->tree->children[1]->node;
-        builder_->StoreMem(mem_type, p->tree->children[0]->node, offset, val);
+        builder_->StoreMem(mem_type, p->tree->children[0]->node, operand.offset,
+                           val);
         p->tree->node = val;
       }
     }
@@ -1111,7 +1348,7 @@
   void SetEnv(const char* reason, SsaEnv* env) {
     TRACE("  env = %p, block depth = %d, reason = %s", static_cast<void*>(env),
           static_cast<int>(blocks_.size()), reason);
-    if (env->control != nullptr && FLAG_trace_wasm_decoder) {
+    if (FLAG_trace_wasm_decoder && env && env->control) {
       TRACE(", control = ");
       compiler::WasmGraphBuilder::PrintDebugName(env->control);
     }
@@ -1286,94 +1523,11 @@
     return result;
   }
 
-  // Load an operand at [pc + 1].
-  template <typename V>
-  V Operand(const byte* pc) {
-    if ((limit_ - pc) < static_cast<int>(1 + sizeof(V))) {
-      const char* msg = "Expected operand following opcode";
-      switch (sizeof(V)) {
-        case 1:
-          msg = "Expected 1-byte operand following opcode";
-          break;
-        case 2:
-          msg = "Expected 2-byte operand following opcode";
-          break;
-        case 4:
-          msg = "Expected 4-byte operand following opcode";
-          break;
-        default:
-          break;
-      }
-      error(pc, msg);
-      return -1;
-    }
-    return *reinterpret_cast<const V*>(pc + 1);
-  }
-
   int EnvironmentCount() {
     if (builder_) return static_cast<int>(function_env_->GetLocalCount());
     return 0;  // if we aren't building a graph, don't bother with SSA renaming.
   }
 
-  LocalType LocalOperand(const byte* pc, uint32_t* index, int* length) {
-    *index = UnsignedLEB128Operand(pc, length);
-    if (function_env_->IsValidLocal(*index)) {
-      return function_env_->GetLocalType(*index);
-    }
-    error(pc, "invalid local variable index");
-    return kAstStmt;
-  }
-
-  LocalType GlobalOperand(const byte* pc, uint32_t* index, int* length) {
-    *index = UnsignedLEB128Operand(pc, length);
-    if (function_env_->module->IsValidGlobal(*index)) {
-      return WasmOpcodes::LocalTypeFor(
-          function_env_->module->GetGlobalType(*index));
-    }
-    error(pc, "invalid global variable index");
-    return kAstStmt;
-  }
-
-  FunctionSig* FunctionSigOperand(const byte* pc, uint32_t* index,
-                                  int* length) {
-    *index = UnsignedLEB128Operand(pc, length);
-    if (function_env_->module->IsValidFunction(*index)) {
-      return function_env_->module->GetFunctionSignature(*index);
-    }
-    error(pc, "invalid function index");
-    return nullptr;
-  }
-
-  FunctionSig* SigOperand(const byte* pc, uint32_t* index, int* length) {
-    *index = UnsignedLEB128Operand(pc, length);
-    if (function_env_->module->IsValidSignature(*index)) {
-      return function_env_->module->GetSignature(*index);
-    }
-    error(pc, "invalid signature index");
-    return nullptr;
-  }
-
-  uint32_t UnsignedLEB128Operand(const byte* pc, int* length) {
-    uint32_t result = 0;
-    ReadUnsignedLEB128ErrorCode error_code =
-        ReadUnsignedLEB128Operand(pc + 1, limit_, length, &result);
-    if (error_code == kInvalidLEB128) error(pc, "invalid LEB128 varint");
-    if (error_code == kMissingLEB128) error(pc, "expected LEB128 varint");
-    (*length)++;
-    return result;
-  }
-
-  void MemoryAccessOperand(const byte* pc, int* length, uint32_t* offset) {
-    byte bitfield = Operand<uint8_t>(pc);
-    if (MemoryAccess::OffsetField::decode(bitfield)) {
-      *offset = UnsignedLEB128Operand(pc + 1, length);
-      (*length)++;  // to account for the memory access byte
-    } else {
-      *offset = 0;
-      *length = 2;
-    }
-  }
-
   virtual void onFirstError() {
     limit_ = start_;     // Terminate decoding loop.
     builder_ = nullptr;  // Don't build any more nodes.
@@ -1447,137 +1601,114 @@
                                                       const byte* limit,
                                                       int* length,
                                                       uint32_t* result) {
-  *result = 0;
-  const byte* ptr = pc;
-  const byte* end = pc + 5;  // maximum 5 bytes.
-  if (end > limit) end = limit;
-  int shift = 0;
-  byte b = 0;
-  while (ptr < end) {
-    b = *ptr++;
-    *result = *result | ((b & 0x7F) << shift);
-    if ((b & 0x80) == 0) break;
-    shift += 7;
-  }
-  DCHECK_LE(ptr - pc, 5);
-  *length = static_cast<int>(ptr - pc);
-  if (ptr == end && (b & 0x80)) {
-    return kInvalidLEB128;
-  } else if (*length == 0) {
-    return kMissingLEB128;
-  } else {
-    return kNoError;
+  Decoder decoder(pc, limit);
+  *result = decoder.checked_read_u32v(pc, 0, length);
+  if (decoder.ok()) return kNoError;
+  return (limit - pc) > 1 ? kInvalidLEB128 : kMissingLEB128;
+}
+
+int OpcodeLength(const byte* pc, const byte* end) {
+  WasmDecoder decoder(nullptr, pc, end);
+  return decoder.OpcodeLength(pc);
+}
+
+int OpcodeArity(FunctionEnv* env, const byte* pc, const byte* end) {
+  WasmDecoder decoder(env, pc, end);
+  return decoder.OpcodeArity(pc);
+}
+
+void PrintAst(FunctionEnv* env, const byte* start, const byte* end) {
+  WasmDecoder decoder(env, start, end);
+  const byte* pc = start;
+  std::vector<int> arity_stack;
+  while (pc < end) {
+    int arity = decoder.OpcodeArity(pc);
+    size_t length = decoder.OpcodeLength(pc);
+
+    for (auto arity : arity_stack) {
+      printf("  ");
+      USE(arity);
+    }
+
+    WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+    printf("k%s,", WasmOpcodes::OpcodeName(opcode));
+
+    for (size_t i = 1; i < length; i++) {
+      printf(" 0x%02x,", pc[i]);
+    }
+    pc += length;
+    printf("\n");
+
+    arity_stack.push_back(arity);
+    while (arity_stack.back() == 0) {
+      arity_stack.pop_back();
+      if (arity_stack.empty()) break;
+      arity_stack.back()--;
+    }
   }
 }
 
-
-int OpcodeLength(const byte* pc) {
-  switch (static_cast<WasmOpcode>(*pc)) {
-#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
-    FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
-    FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
-#undef DECLARE_OPCODE_CASE
-
-    case kExprI8Const:
-    case kExprBlock:
-    case kExprLoop:
-    case kExprBr:
-    case kExprBrIf:
-      return 2;
-    case kExprI32Const:
-    case kExprF32Const:
-      return 5;
-    case kExprI64Const:
-    case kExprF64Const:
-      return 9;
-    case kExprStoreGlobal:
-    case kExprSetLocal:
-    case kExprLoadGlobal:
-    case kExprCallFunction:
-    case kExprCallIndirect:
-    case kExprGetLocal: {
-      int length;
-      uint32_t result = 0;
-      ReadUnsignedLEB128Operand(pc + 1, pc + 6, &length, &result);
-      return 1 + length;
-    }
-    case kExprTableSwitch: {
-      uint16_t table_count = *reinterpret_cast<const uint16_t*>(pc + 3);
-      return 5 + table_count * 2;
-    }
-
-    default:
-      return 1;
+// Analyzes loop bodies for static assignments to locals, which helps in
+// reducing the number of phis introduced at loop headers.
+class LoopAssignmentAnalyzer : public WasmDecoder {
+ public:
+  LoopAssignmentAnalyzer(Zone* zone, FunctionEnv* function_env) : zone_(zone) {
+    function_env_ = function_env;
   }
+
+  BitVector* Analyze(const byte* pc, const byte* limit) {
+    Decoder::Reset(pc, limit);
+    if (pc_ >= limit_) return nullptr;
+    if (*pc_ != kExprLoop) return nullptr;
+
+    BitVector* assigned =
+        new (zone_) BitVector(function_env_->total_locals, zone_);
+    // Keep a stack to model the nesting of expressions.
+    std::vector<int> arity_stack;
+    arity_stack.push_back(OpcodeArity(pc_));
+    pc_ += OpcodeLength(pc_);
+
+    // Iteratively process all AST nodes nested inside the loop.
+    while (pc_ < limit_) {
+      WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
+      int arity = 0;
+      int length = 1;
+      if (opcode == kExprSetLocal) {
+        LocalIndexOperand operand(this, pc_);
+        if (assigned->length() > 0 &&
+            static_cast<int>(operand.index) < assigned->length()) {
+          // Unverified code might have an out-of-bounds index.
+          assigned->Add(operand.index);
+        }
+        arity = 1;
+        length = 1 + operand.length;
+      } else {
+        arity = OpcodeArity(pc_);
+        length = OpcodeLength(pc_);
+      }
+
+      pc_ += length;
+      arity_stack.push_back(arity);
+      while (arity_stack.back() == 0) {
+        arity_stack.pop_back();
+        if (arity_stack.empty()) return assigned;  // reached end of loop
+        arity_stack.back()--;
+      }
+    }
+    return assigned;
+  }
+
+ private:
+  Zone* zone_;
+};
+
+
+BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, FunctionEnv* env,
+                                           const byte* start, const byte* end) {
+  LoopAssignmentAnalyzer analyzer(zone, env);
+  return analyzer.Analyze(start, end);
 }
 
-
-int OpcodeArity(FunctionEnv* env, const byte* pc) {
-#define DECLARE_ARITY(name, ...)                          \
-  static const LocalType kTypes_##name[] = {__VA_ARGS__}; \
-  static const int kArity_##name =                        \
-      static_cast<int>(arraysize(kTypes_##name) - 1);
-
-  FOREACH_SIGNATURE(DECLARE_ARITY);
-#undef DECLARE_ARITY
-
-  switch (static_cast<WasmOpcode>(*pc)) {
-    case kExprI8Const:
-    case kExprI32Const:
-    case kExprI64Const:
-    case kExprF64Const:
-    case kExprF32Const:
-    case kExprGetLocal:
-    case kExprLoadGlobal:
-    case kExprNop:
-    case kExprUnreachable:
-      return 0;
-
-    case kExprBr:
-    case kExprStoreGlobal:
-    case kExprSetLocal:
-      return 1;
-
-    case kExprIf:
-    case kExprBrIf:
-      return 2;
-    case kExprIfElse:
-    case kExprSelect:
-      return 3;
-    case kExprBlock:
-    case kExprLoop:
-      return *(pc + 1);
-
-    case kExprCallFunction: {
-      int index = *(pc + 1);
-      return static_cast<int>(
-          env->module->GetFunctionSignature(index)->parameter_count());
-    }
-    case kExprCallIndirect: {
-      int index = *(pc + 1);
-      return 1 + static_cast<int>(
-                     env->module->GetSignature(index)->parameter_count());
-    }
-    case kExprReturn:
-      return static_cast<int>(env->sig->return_count());
-    case kExprTableSwitch: {
-      uint16_t case_count = *reinterpret_cast<const uint16_t*>(pc + 1);
-      return 1 + case_count;
-    }
-
-#define DECLARE_OPCODE_CASE(name, opcode, sig) \
-  case kExpr##name:                            \
-    return kArity_##sig;
-
-      FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
-      FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
-      FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
-      FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
-#undef DECLARE_OPCODE_CASE
-  }
-  UNREACHABLE();
-  return 0;
-}
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/ast-decoder.h b/src/wasm/ast-decoder.h
index 5b95ad9..465baca 100644
--- a/src/wasm/ast-decoder.h
+++ b/src/wasm/ast-decoder.h
@@ -6,18 +6,181 @@
 #define V8_WASM_AST_DECODER_H_
 
 #include "src/signature.h"
+#include "src/wasm/decoder.h"
 #include "src/wasm/wasm-opcodes.h"
 #include "src/wasm/wasm-result.h"
 
 namespace v8 {
 namespace internal {
 
+class BitVector;  // forward declaration
+
 namespace compiler {  // external declarations from compiler.
 class WasmGraphBuilder;
 }
 
 namespace wasm {
 
+// Helpers for decoding different kinds of operands which follow bytecodes.
+struct LocalIndexOperand {
+  uint32_t index;
+  LocalType type;
+  int length;
+
+  inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
+    index = decoder->checked_read_u32v(pc, 1, &length, "local index");
+    type = kAstStmt;
+  }
+};
+
+struct ImmI8Operand {
+  int8_t value;
+  int length;
+  inline ImmI8Operand(Decoder* decoder, const byte* pc) {
+    value = bit_cast<int8_t>(decoder->checked_read_u8(pc, 1, "immi8"));
+    length = 1;
+  }
+};
+
+struct ImmI32Operand {
+  int32_t value;
+  int length;
+  inline ImmI32Operand(Decoder* decoder, const byte* pc) {
+    value = bit_cast<int32_t>(decoder->checked_read_u32(pc, 1, "immi32"));
+    length = 4;
+  }
+};
+
+struct ImmI64Operand {
+  int64_t value;
+  int length;
+  inline ImmI64Operand(Decoder* decoder, const byte* pc) {
+    value = bit_cast<int64_t>(decoder->checked_read_u64(pc, 1, "immi64"));
+    length = 8;
+  }
+};
+
+struct ImmF32Operand {
+  float value;
+  int length;
+  inline ImmF32Operand(Decoder* decoder, const byte* pc) {
+    value = bit_cast<float>(decoder->checked_read_u32(pc, 1, "immf32"));
+    length = 4;
+  }
+};
+
+struct ImmF64Operand {
+  double value;
+  int length;
+  inline ImmF64Operand(Decoder* decoder, const byte* pc) {
+    value = bit_cast<double>(decoder->checked_read_u64(pc, 1, "immf64"));
+    length = 8;
+  }
+};
+
+struct GlobalIndexOperand {
+  uint32_t index;
+  LocalType type;
+  MachineType machine_type;
+  int length;
+
+  inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
+    index = decoder->checked_read_u32v(pc, 1, &length, "global index");
+    type = kAstStmt;
+    machine_type = MachineType::None();
+  }
+};
+
+struct Block;
+struct BreakDepthOperand {
+  uint32_t depth;
+  Block* target;
+  int length;
+  inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
+    depth = decoder->checked_read_u8(pc, 1, "break depth");
+    length = 1;
+    target = nullptr;
+  }
+};
+
+struct BlockCountOperand {
+  uint32_t count;
+  int length;
+  inline BlockCountOperand(Decoder* decoder, const byte* pc) {
+    count = decoder->checked_read_u8(pc, 1, "block count");
+    length = 1;
+  }
+};
+
+struct SignatureIndexOperand {
+  uint32_t index;
+  FunctionSig* sig;
+  int length;
+  inline SignatureIndexOperand(Decoder* decoder, const byte* pc) {
+    index = decoder->checked_read_u32v(pc, 1, &length, "signature index");
+    sig = nullptr;
+  }
+};
+
+struct FunctionIndexOperand {
+  uint32_t index;
+  FunctionSig* sig;
+  int length;
+  inline FunctionIndexOperand(Decoder* decoder, const byte* pc) {
+    index = decoder->checked_read_u32v(pc, 1, &length, "function index");
+    sig = nullptr;
+  }
+};
+
+struct ImportIndexOperand {
+  uint32_t index;
+  FunctionSig* sig;
+  int length;
+  inline ImportIndexOperand(Decoder* decoder, const byte* pc) {
+    index = decoder->checked_read_u32v(pc, 1, &length, "import index");
+    sig = nullptr;
+  }
+};
+
+struct TableSwitchOperand {
+  uint32_t case_count;
+  uint32_t table_count;
+  const byte* table;
+  int length;
+  inline TableSwitchOperand(Decoder* decoder, const byte* pc) {
+    case_count = decoder->checked_read_u16(pc, 1, "expected #cases");
+    table_count = decoder->checked_read_u16(pc, 3, "expected #entries");
+    length = 4 + table_count * 2;
+
+    if (decoder->check(pc, 5, table_count * 2, "expected <table entries>")) {
+      table = pc + 5;
+    } else {
+      table = nullptr;
+    }
+  }
+  inline uint16_t read_entry(Decoder* decoder, int i) {
+    DCHECK(i >= 0 && static_cast<uint32_t>(i) < table_count);
+    return table ? decoder->read_u16(table + i * sizeof(uint16_t)) : 0;
+  }
+};
+
+struct MemoryAccessOperand {
+  bool aligned;
+  uint32_t offset;
+  int length;
+  inline MemoryAccessOperand(Decoder* decoder, const byte* pc) {
+    byte bitfield = decoder->checked_read_u8(pc, 1, "memory access byte");
+    aligned = MemoryAccess::AlignmentField::decode(bitfield);
+    if (MemoryAccess::OffsetField::decode(bitfield)) {
+      offset = decoder->checked_read_u32v(pc, 2, &length, "memory offset");
+      length++;
+    } else {
+      offset = 0;
+      length = 1;
+    }
+  }
+};
+
 typedef compiler::WasmGraphBuilder TFBuilder;
 struct ModuleEnv;  // forward declaration of module interface.
 
@@ -26,56 +189,55 @@
 struct FunctionEnv {
   ModuleEnv* module;             // module environment
   FunctionSig* sig;              // signature of this function
-  uint32_t local_int32_count;    // number of int32 locals
-  uint32_t local_int64_count;    // number of int64 locals
-  uint32_t local_float32_count;  // number of float32 locals
-  uint32_t local_float64_count;  // number of float64 locals
+  uint32_t local_i32_count;      // number of int32 locals
+  uint32_t local_i64_count;      // number of int64 locals
+  uint32_t local_f32_count;      // number of float32 locals
+  uint32_t local_f64_count;      // number of float64 locals
   uint32_t total_locals;         // sum of parameters and all locals
 
-  bool IsValidLocal(uint32_t index) { return index < total_locals; }
   uint32_t GetLocalCount() { return total_locals; }
   LocalType GetLocalType(uint32_t index) {
     if (index < static_cast<uint32_t>(sig->parameter_count())) {
       return sig->GetParam(index);
     }
     index -= static_cast<uint32_t>(sig->parameter_count());
-    if (index < local_int32_count) return kAstI32;
-    index -= local_int32_count;
-    if (index < local_int64_count) return kAstI64;
-    index -= local_int64_count;
-    if (index < local_float32_count) return kAstF32;
-    index -= local_float32_count;
-    if (index < local_float64_count) return kAstF64;
+    if (index < local_i32_count) return kAstI32;
+    index -= local_i32_count;
+    if (index < local_i64_count) return kAstI64;
+    index -= local_i64_count;
+    if (index < local_f32_count) return kAstF32;
+    index -= local_f32_count;
+    if (index < local_f64_count) return kAstF64;
     return kAstStmt;
   }
 
   void AddLocals(LocalType type, uint32_t count) {
     switch (type) {
       case kAstI32:
-        local_int32_count += count;
+        local_i32_count += count;
         break;
       case kAstI64:
-        local_int64_count += count;
+        local_i64_count += count;
         break;
       case kAstF32:
-        local_float32_count += count;
+        local_f32_count += count;
         break;
       case kAstF64:
-        local_float64_count += count;
+        local_f64_count += count;
         break;
       default:
         UNREACHABLE();
     }
     total_locals += count;
-    DCHECK(total_locals ==
-           (sig->parameter_count() + local_int32_count + local_int64_count +
-            local_float32_count + local_float64_count));
+    DCHECK_EQ(total_locals,
+              (sig->parameter_count() + local_i32_count + local_i64_count +
+               local_f32_count + local_f64_count));
   }
 
   void SumLocals() {
     total_locals = static_cast<uint32_t>(sig->parameter_count()) +
-                   local_int32_count + local_int64_count + local_float32_count +
-                   local_float64_count;
+                   local_i32_count + local_i64_count + local_f32_count +
+                   local_f64_count;
   }
 };
 
@@ -89,6 +251,8 @@
 TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
                         const byte* start, const byte* end);
 
+void PrintAst(FunctionEnv* env, const byte* start, const byte* end);
+
 inline TreeResult VerifyWasmCode(FunctionEnv* env, const byte* start,
                                  const byte* end) {
   return VerifyWasmCode(env, nullptr, start, end);
@@ -104,11 +268,14 @@
 ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte*, const byte*,
                                                       int*, uint32_t*);
 
+BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, FunctionEnv* env,
+                                           const byte* start, const byte* end);
+
 // Computes the length of the opcode at the given address.
-int OpcodeLength(const byte* pc);
+int OpcodeLength(const byte* pc, const byte* end);
 
 // Computes the arity (number of sub-nodes) of the opcode at the given address.
-int OpcodeArity(FunctionEnv* env, const byte* pc);
+int OpcodeArity(FunctionEnv* env, const byte* pc, const byte* end);
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/decoder.h b/src/wasm/decoder.h
index 698919d..0e88eda 100644
--- a/src/wasm/decoder.h
+++ b/src/wasm/decoder.h
@@ -24,6 +24,12 @@
 #define TRACE(...)
 #endif
 
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
+#define UNALIGNED_ACCESS_OK 1
+#else
+#define UNALIGNED_ACCESS_OK 0
+#endif
+
 // A helper utility to decode bytes, integers, fields, varints, etc, from
 // a buffer of bytes.
 class Decoder {
@@ -32,107 +38,188 @@
       : start_(start),
         pc_(start),
         limit_(end),
+        end_(end),
         error_pc_(nullptr),
         error_pt_(nullptr) {}
 
   virtual ~Decoder() {}
 
+  inline bool check(const byte* base, int offset, int length, const char* msg) {
+    DCHECK_GE(base, start_);
+    if ((base + offset + length) > limit_) {
+      error(base, base + offset, msg);
+      return false;
+    }
+    return true;
+  }
+
+  // Reads a single 8-bit byte, reporting an error if out of bounds.
+  inline uint8_t checked_read_u8(const byte* base, int offset,
+                                 const char* msg = "expected 1 byte") {
+    return check(base, offset, 1, msg) ? base[offset] : 0;
+  }
+
+  // Reads 16-bit word, reporting an error if out of bounds.
+  inline uint16_t checked_read_u16(const byte* base, int offset,
+                                   const char* msg = "expected 2 bytes") {
+    return check(base, offset, 2, msg) ? read_u16(base + offset) : 0;
+  }
+
+  // Reads 32-bit word, reporting an error if out of bounds.
+  inline uint32_t checked_read_u32(const byte* base, int offset,
+                                   const char* msg = "expected 4 bytes") {
+    return check(base, offset, 4, msg) ? read_u32(base + offset) : 0;
+  }
+
+  // Reads 64-bit word, reporting an error if out of bounds.
+  inline uint64_t checked_read_u64(const byte* base, int offset,
+                                   const char* msg = "expected 8 bytes") {
+    return check(base, offset, 8, msg) ? read_u64(base + offset) : 0;
+  }
+
+  uint32_t checked_read_u32v(const byte* base, int offset, int* length,
+                             const char* msg = "expected LEB128") {
+    if (!check(base, offset, 1, msg)) {
+      *length = 0;
+      return 0;
+    }
+
+    const ptrdiff_t kMaxDiff = 5;  // maximum 5 bytes.
+    const byte* ptr = base + offset;
+    const byte* end = ptr + kMaxDiff;
+    if (end > limit_) end = limit_;
+    int shift = 0;
+    byte b = 0;
+    uint32_t result = 0;
+    while (ptr < end) {
+      b = *ptr++;
+      result = result | ((b & 0x7F) << shift);
+      if ((b & 0x80) == 0) break;
+      shift += 7;
+    }
+    DCHECK_LE(ptr - (base + offset), kMaxDiff);
+    *length = static_cast<int>(ptr - (base + offset));
+    if (ptr == end && (b & 0x80)) {
+      error(base, ptr, msg);
+      return 0;
+    }
+    return result;
+  }
+
+  // Reads a single 16-bit unsigned integer (little endian).
+  inline uint16_t read_u16(const byte* ptr) {
+    DCHECK(ptr >= start_ && (ptr + 2) <= end_);
+#if V8_TARGET_LITTLE_ENDIAN && UNALIGNED_ACCESS_OK
+    return *reinterpret_cast<const uint16_t*>(ptr);
+#else
+    uint16_t b0 = ptr[0];
+    uint16_t b1 = ptr[1];
+    return (b1 << 8) | b0;
+#endif
+  }
+
+  // Reads a single 32-bit unsigned integer (little endian).
+  inline uint32_t read_u32(const byte* ptr) {
+    DCHECK(ptr >= start_ && (ptr + 4) <= end_);
+#if V8_TARGET_LITTLE_ENDIAN && UNALIGNED_ACCESS_OK
+    return *reinterpret_cast<const uint32_t*>(ptr);
+#else
+    uint32_t b0 = ptr[0];
+    uint32_t b1 = ptr[1];
+    uint32_t b2 = ptr[2];
+    uint32_t b3 = ptr[3];
+    return (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+#endif
+  }
+
+  // Reads a single 64-bit unsigned integer (little endian).
+  inline uint64_t read_u64(const byte* ptr) {
+    DCHECK(ptr >= start_ && (ptr + 8) <= end_);
+#if V8_TARGET_LITTLE_ENDIAN && UNALIGNED_ACCESS_OK
+    return *reinterpret_cast<const uint64_t*>(ptr);
+#else
+    uint32_t b0 = ptr[0];
+    uint32_t b1 = ptr[1];
+    uint32_t b2 = ptr[2];
+    uint32_t b3 = ptr[3];
+    uint32_t low = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+    uint32_t b4 = ptr[4];
+    uint32_t b5 = ptr[5];
+    uint32_t b6 = ptr[6];
+    uint32_t b7 = ptr[7];
+    uint64_t high = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
+    return (high << 32) | low;
+#endif
+  }
+
   // Reads a 8-bit unsigned integer (byte) and advances {pc_}.
-  uint8_t u8(const char* name = nullptr) {
+  uint8_t consume_u8(const char* name = nullptr) {
     TRACE("  +%d  %-20s: ", static_cast<int>(pc_ - start_),
           name ? name : "uint8_t");
     if (checkAvailable(1)) {
       byte val = *(pc_++);
       TRACE("%02x = %d\n", val, val);
       return val;
-    } else {
-      error("expected 1 byte, but fell off end");
-      return traceOffEnd<uint8_t>();
     }
+    return traceOffEnd<uint8_t>();
   }
 
   // Reads a 16-bit unsigned integer (little endian) and advances {pc_}.
-  uint16_t u16(const char* name = nullptr) {
+  uint16_t consume_u16(const char* name = nullptr) {
     TRACE("  +%d  %-20s: ", static_cast<int>(pc_ - start_),
           name ? name : "uint16_t");
     if (checkAvailable(2)) {
-#ifdef V8_TARGET_LITTLE_ENDIAN
-      byte b0 = pc_[0];
-      byte b1 = pc_[1];
-#else
-      byte b1 = pc_[0];
-      byte b0 = pc_[1];
-#endif
-      uint16_t val = static_cast<uint16_t>(b1 << 8) | b0;
+      uint16_t val = read_u16(pc_);
       TRACE("%02x %02x = %d\n", pc_[0], pc_[1], val);
       pc_ += 2;
       return val;
-    } else {
-      error("expected 2 bytes, but fell off end");
-      return traceOffEnd<uint16_t>();
     }
+    return traceOffEnd<uint16_t>();
   }
 
   // Reads a single 32-bit unsigned integer (little endian) and advances {pc_}.
-  uint32_t u32(const char* name = nullptr) {
+  uint32_t consume_u32(const char* name = nullptr) {
     TRACE("  +%d  %-20s: ", static_cast<int>(pc_ - start_),
           name ? name : "uint32_t");
     if (checkAvailable(4)) {
-#ifdef V8_TARGET_LITTLE_ENDIAN
-      byte b0 = pc_[0];
-      byte b1 = pc_[1];
-      byte b2 = pc_[2];
-      byte b3 = pc_[3];
-#else
-      byte b3 = pc_[0];
-      byte b2 = pc_[1];
-      byte b1 = pc_[2];
-      byte b0 = pc_[3];
-#endif
-      uint32_t val = static_cast<uint32_t>(b3 << 24) |
-                     static_cast<uint32_t>(b2 << 16) |
-                     static_cast<uint32_t>(b1 << 8) | b0;
+      uint32_t val = read_u32(pc_);
       TRACE("%02x %02x %02x %02x = %u\n", pc_[0], pc_[1], pc_[2], pc_[3], val);
       pc_ += 4;
       return val;
-    } else {
-      error("expected 4 bytes, but fell off end");
-      return traceOffEnd<uint32_t>();
     }
+    return traceOffEnd<uint32_t>();
   }
 
   // Reads a LEB128 variable-length 32-bit integer and advances {pc_}.
-  uint32_t u32v(int* length, const char* name = nullptr) {
+  uint32_t consume_u32v(int* length, const char* name = nullptr) {
     TRACE("  +%d  %-20s: ", static_cast<int>(pc_ - start_),
           name ? name : "varint");
 
-    if (!checkAvailable(1)) {
-      error("expected at least 1 byte, but fell off end");
-      return traceOffEnd<uint32_t>();
-    }
+    if (checkAvailable(1)) {
+      const byte* pos = pc_;
+      const byte* end = pc_ + 5;
+      if (end > limit_) end = limit_;
 
-    const byte* pos = pc_;
-    const byte* end = pc_ + 5;
-    if (end > limit_) end = limit_;
+      uint32_t result = 0;
+      int shift = 0;
+      byte b = 0;
+      while (pc_ < end) {
+        b = *pc_++;
+        TRACE("%02x ", b);
+        result = result | ((b & 0x7F) << shift);
+        if ((b & 0x80) == 0) break;
+        shift += 7;
+      }
 
-    uint32_t result = 0;
-    int shift = 0;
-    byte b = 0;
-    while (pc_ < end) {
-      b = *pc_++;
-      TRACE("%02x ", b);
-      result = result | ((b & 0x7F) << shift);
-      if ((b & 0x80) == 0) break;
-      shift += 7;
+      *length = static_cast<int>(pc_ - pos);
+      if (pc_ == end && (b & 0x80)) {
+        error(pc_ - 1, "varint too large");
+      } else {
+        TRACE("= %u\n", result);
+      }
+      return result;
     }
-
-    *length = static_cast<int>(pc_ - pos);
-    if (pc_ == end && (b & 0x80)) {
-      error(pc_ - 1, "varint too large");
-    } else {
-      TRACE("= %u\n", result);
-    }
-    return result;
+    return traceOffEnd<uint32_t>();
   }
 
   // Check that at least {size} bytes exist between {pc_} and {limit_}.
@@ -145,6 +232,12 @@
     }
   }
 
+  bool RangeOk(const byte* pc, int length) {
+    if (pc < start_ || pc_ >= limit_) return false;
+    if ((pc + length) >= limit_) return false;
+    return true;
+  }
+
   void error(const char* msg) { error(pc_, nullptr, msg); }
 
   void error(const byte* pc, const char* msg) { error(pc, nullptr, msg); }
@@ -208,6 +301,7 @@
     start_ = start;
     pc_ = start;
     limit_ = end;
+    end_ = end;
     error_pc_ = nullptr;
     error_pt_ = nullptr;
     error_msg_.Reset(nullptr);
@@ -220,6 +314,7 @@
   const byte* start_;
   const byte* pc_;
   const byte* limit_;
+  const byte* end_;
   const byte* error_pc_;
   const byte* error_pt_;
   base::SmartArrayPointer<char> error_msg_;
diff --git a/src/wasm/encoder.cc b/src/wasm/encoder.cc
index d8d3633..d80a275 100644
--- a/src/wasm/encoder.cc
+++ b/src/wasm/encoder.cc
@@ -30,13 +30,13 @@
 
 
 void EmitUint16(byte** b, uint16_t x) {
-  Memory::uint16_at(*b) = x;
+  WriteUnalignedUInt16(*b, x);
   *b += 2;
 }
 
 
 void EmitUint32(byte** b, uint32_t x) {
-  Memory::uint32_at(*b) = x;
+  WriteUnalignedUInt32(*b, x);
   *b += 4;
 }
 
@@ -121,12 +121,6 @@
 }
 
 
-void WasmFunctionBuilder::EmitWithLocal(WasmOpcode opcode) {
-  body_.push_back(static_cast<byte>(opcode));
-  local_indices_.push_back(static_cast<uint32_t>(body_.size()) - 1);
-}
-
-
 uint32_t WasmFunctionBuilder::EmitEditableImmediate(const byte immediate) {
   body_.push_back(immediate);
   return static_cast<uint32_t>(body_.size()) - 1;
@@ -202,44 +196,44 @@
 void WasmFunctionBuilder::IndexVars(WasmFunctionEncoder* e,
                                     uint16_t* var_index) const {
   uint16_t param = 0;
-  uint16_t int32 = 0;
-  uint16_t int64 = 0;
-  uint16_t float32 = 0;
-  uint16_t float64 = 0;
+  uint16_t i32 = 0;
+  uint16_t i64 = 0;
+  uint16_t f32 = 0;
+  uint16_t f64 = 0;
   for (size_t i = 0; i < locals_.size(); i++) {
     if (locals_.at(i).param_) {
       param++;
     } else if (locals_.at(i).type_ == kAstI32) {
-      int32++;
+      i32++;
     } else if (locals_.at(i).type_ == kAstI64) {
-      int64++;
+      i64++;
     } else if (locals_.at(i).type_ == kAstF32) {
-      float32++;
+      f32++;
     } else if (locals_.at(i).type_ == kAstF64) {
-      float64++;
+      f64++;
     }
   }
-  e->local_int32_count_ = int32;
-  e->local_int64_count_ = int64;
-  e->local_float32_count_ = float32;
-  e->local_float64_count_ = float64;
-  float64 = param + int32 + int64 + float32;
-  float32 = param + int32 + int64;
-  int64 = param + int32;
-  int32 = param;
+  e->local_i32_count_ = i32;
+  e->local_i64_count_ = i64;
+  e->local_f32_count_ = f32;
+  e->local_f64_count_ = f64;
+  f64 = param + i32 + i64 + f32;
+  f32 = param + i32 + i64;
+  i64 = param + i32;
+  i32 = param;
   param = 0;
   for (size_t i = 0; i < locals_.size(); i++) {
     if (locals_.at(i).param_) {
       e->params_.push_back(locals_.at(i).type_);
       var_index[i] = param++;
     } else if (locals_.at(i).type_ == kAstI32) {
-      var_index[i] = int32++;
+      var_index[i] = i32++;
     } else if (locals_.at(i).type_ == kAstI64) {
-      var_index[i] = int64++;
+      var_index[i] = i64++;
     } else if (locals_.at(i).type_ == kAstF32) {
-      var_index[i] = float32++;
+      var_index[i] = f32++;
     } else if (locals_.at(i).type_ == kAstF64) {
-      var_index[i] = float64++;
+      var_index[i] = f64++;
     }
   }
 }
@@ -269,7 +263,7 @@
 
 
 uint32_t WasmFunctionEncoder::NameSize() const {
-  return exported_ ? static_cast<uint32_t>(name_.size()) : 0;
+  return HasName() ? static_cast<uint32_t>(name_.size()) : 0;
 }
 
 
@@ -291,10 +285,10 @@
   }
 
   if (HasLocals()) {
-    EmitUint16(header, local_int32_count_);
-    EmitUint16(header, local_int64_count_);
-    EmitUint16(header, local_float32_count_);
-    EmitUint16(header, local_float64_count_);
+    EmitUint16(header, local_i32_count_);
+    EmitUint16(header, local_i64_count_);
+    EmitUint16(header, local_f32_count_);
+    EmitUint16(header, local_f64_count_);
   }
 
   if (!external_) {
@@ -370,21 +364,21 @@
 }
 
 
-int WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
-                                                       FunctionSig* b) const {
-  if (a->return_count() < b->return_count()) return -1;
-  if (a->return_count() > b->return_count()) return 1;
-  if (a->parameter_count() < b->parameter_count()) return -1;
-  if (a->parameter_count() > b->parameter_count()) return 1;
+bool WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
+                                                        FunctionSig* b) const {
+  if (a->return_count() < b->return_count()) return true;
+  if (a->return_count() > b->return_count()) return false;
+  if (a->parameter_count() < b->parameter_count()) return true;
+  if (a->parameter_count() > b->parameter_count()) return false;
   for (size_t r = 0; r < a->return_count(); r++) {
-    if (a->GetReturn(r) < b->GetReturn(r)) return -1;
-    if (a->GetReturn(r) > b->GetReturn(r)) return 1;
+    if (a->GetReturn(r) < b->GetReturn(r)) return true;
+    if (a->GetReturn(r) > b->GetReturn(r)) return false;
   }
   for (size_t p = 0; p < a->parameter_count(); p++) {
-    if (a->GetParam(p) < b->GetParam(p)) return -1;
-    if (a->GetParam(p) > b->GetParam(p)) return 1;
+    if (a->GetParam(p) < b->GetParam(p)) return true;
+    if (a->GetParam(p) > b->GetParam(p)) return false;
   }
-  return 0;
+  return false;
 }
 
 
diff --git a/src/wasm/encoder.h b/src/wasm/encoder.h
index f0fabe9..7b651bf 100644
--- a/src/wasm/encoder.h
+++ b/src/wasm/encoder.h
@@ -33,21 +33,21 @@
   friend class WasmFunctionBuilder;
   uint16_t signature_index_;
   ZoneVector<LocalType> params_;
-  uint16_t local_int32_count_;
-  uint16_t local_int64_count_;
-  uint16_t local_float32_count_;
-  uint16_t local_float64_count_;
+  uint16_t local_i32_count_;
+  uint16_t local_i64_count_;
+  uint16_t local_f32_count_;
+  uint16_t local_f64_count_;
   bool exported_;
   bool external_;
   ZoneVector<uint8_t> body_;
   ZoneVector<char> name_;
 
   bool HasLocals() const {
-    return (local_int32_count_ + local_int64_count_ + local_float32_count_ +
-            local_float64_count_) > 0;
+    return (local_i32_count_ + local_i64_count_ + local_f32_count_ +
+            local_f64_count_) > 0;
   }
 
-  bool HasName() const { return exported_ && name_.size() > 0; }
+  bool HasName() const { return (exported_ || external_) && name_.size() > 0; }
 };
 
 class WasmFunctionBuilder : public ZoneObject {
@@ -60,7 +60,6 @@
                 const uint32_t* local_indices, uint32_t indices_size);
   void Emit(WasmOpcode opcode);
   void EmitWithU8(WasmOpcode opcode, const byte immediate);
-  void EmitWithLocal(WasmOpcode opcode);
   uint32_t EmitEditableImmediate(const byte immediate);
   void EditImmediate(uint32_t offset, const byte immediate);
   void Exported(uint8_t flag);
@@ -134,12 +133,12 @@
   void AddIndirectFunction(uint16_t index);
   WasmModuleWriter* Build(Zone* zone);
 
- private:
   struct CompareFunctionSigs {
-    int operator()(FunctionSig* a, FunctionSig* b) const;
+    bool operator()(FunctionSig* a, FunctionSig* b) const;
   };
   typedef ZoneMap<FunctionSig*, uint16_t, CompareFunctionSigs> SignatureMap;
 
+ private:
   Zone* zone_;
   ZoneVector<FunctionSig*> signatures_;
   ZoneVector<WasmFunctionBuilder*> functions_;
diff --git a/src/wasm/module-decoder.cc b/src/wasm/module-decoder.cc
index 24f3982..62b000d 100644
--- a/src/wasm/module-decoder.cc
+++ b/src/wasm/module-decoder.cc
@@ -54,6 +54,7 @@
     module->functions = new std::vector<WasmFunction>();
     module->data_segments = new std::vector<WasmDataSegment>();
     module->function_table = new std::vector<uint16_t>();
+    module->import_table = new std::vector<WasmImport>();
 
     bool sections[kMaxModuleSectionCode];
     memset(sections, 0, sizeof(sections));
@@ -62,7 +63,7 @@
     while (pc_ < limit_) {
       TRACE("DecodeSection\n");
       WasmSectionDeclCode section =
-          static_cast<WasmSectionDeclCode>(u8("section"));
+          static_cast<WasmSectionDeclCode>(consume_u8("section"));
       // Each section should appear at most once.
       if (section < kMaxModuleSectionCode) {
         CheckForPreviousSection(sections, section, false);
@@ -75,20 +76,20 @@
           limit_ = pc_;
           break;
         case kDeclMemory:
-          module->min_mem_size_log2 = u8("min memory");
-          module->max_mem_size_log2 = u8("max memory");
-          module->mem_export = u8("export memory") != 0;
+          module->min_mem_size_log2 = consume_u8("min memory");
+          module->max_mem_size_log2 = consume_u8("max memory");
+          module->mem_export = consume_u8("export memory") != 0;
           break;
         case kDeclSignatures: {
           int length;
-          uint32_t signatures_count = u32v(&length, "signatures count");
+          uint32_t signatures_count = consume_u32v(&length, "signatures count");
           module->signatures->reserve(SafeReserve(signatures_count));
           // Decode signatures.
           for (uint32_t i = 0; i < signatures_count; i++) {
             if (failed()) break;
             TRACE("DecodeSignature[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
-            FunctionSig* s = sig();  // read function sig.
+            FunctionSig* s = consume_sig();  // read function sig.
             module->signatures->push_back(s);
           }
           break;
@@ -97,15 +98,12 @@
           // Functions require a signature table first.
           CheckForPreviousSection(sections, kDeclSignatures, true);
           int length;
-          uint32_t functions_count = u32v(&length, "functions count");
+          uint32_t functions_count = consume_u32v(&length, "functions count");
           module->functions->reserve(SafeReserve(functions_count));
           // Set up module environment for verification.
           ModuleEnv menv;
           menv.module = module;
-          menv.globals_area = 0;
-          menv.mem_start = 0;
-          menv.mem_end = 0;
-          menv.function_code = nullptr;
+          menv.instance = nullptr;
           menv.asm_js = asm_js_;
           // Decode functions.
           for (uint32_t i = 0; i < functions_count; i++) {
@@ -114,7 +112,7 @@
                   static_cast<int>(pc_ - start_));
 
             module->functions->push_back(
-                {nullptr, 0, 0, 0, 0, 0, 0, false, false});
+                {nullptr, i, 0, 0, 0, 0, 0, 0, false, false});
             WasmFunction* function = &module->functions->back();
             DecodeFunctionInModule(module, function, false);
           }
@@ -133,7 +131,7 @@
         }
         case kDeclGlobals: {
           int length;
-          uint32_t globals_count = u32v(&length, "globals count");
+          uint32_t globals_count = consume_u32v(&length, "globals count");
           module->globals->reserve(SafeReserve(globals_count));
           // Decode globals.
           for (uint32_t i = 0; i < globals_count; i++) {
@@ -148,7 +146,8 @@
         }
         case kDeclDataSegments: {
           int length;
-          uint32_t data_segments_count = u32v(&length, "data segments count");
+          uint32_t data_segments_count =
+              consume_u32v(&length, "data segments count");
           module->data_segments->reserve(SafeReserve(data_segments_count));
           // Decode data segments.
           for (uint32_t i = 0; i < data_segments_count; i++) {
@@ -157,7 +156,7 @@
                   static_cast<int>(pc_ - start_));
             module->data_segments->push_back({0, 0, 0});
             WasmDataSegment* segment = &module->data_segments->back();
-            DecodeDataSegmentInModule(segment);
+            DecodeDataSegmentInModule(module, segment);
           }
           break;
         }
@@ -165,14 +164,15 @@
           // An indirect function table requires functions first.
           CheckForPreviousSection(sections, kDeclFunctions, true);
           int length;
-          uint32_t function_table_count = u32v(&length, "function table count");
+          uint32_t function_table_count =
+              consume_u32v(&length, "function table count");
           module->function_table->reserve(SafeReserve(function_table_count));
           // Decode function table.
           for (uint32_t i = 0; i < function_table_count; i++) {
             if (failed()) break;
             TRACE("DecodeFunctionTable[%d] module+%d\n", i,
                   static_cast<int>(pc_ - start_));
-            uint16_t index = u16();
+            uint16_t index = consume_u16();
             if (index >= module->functions->size()) {
               error(pc_ - 2, "invalid function index");
               break;
@@ -181,13 +181,66 @@
           }
           break;
         }
+        case kDeclStartFunction: {
+          // Declares a start function for a module.
+          CheckForPreviousSection(sections, kDeclFunctions, true);
+          if (module->start_function_index >= 0) {
+            error("start function already declared");
+            break;
+          }
+          int length;
+          const byte* before = pc_;
+          uint32_t index = consume_u32v(&length, "start function index");
+          if (index >= module->functions->size()) {
+            error(before, "invalid start function index");
+            break;
+          }
+          module->start_function_index = static_cast<int>(index);
+          FunctionSig* sig =
+              module->signatures->at(module->functions->at(index).sig_index);
+          if (sig->parameter_count() > 0) {
+            error(before, "invalid start function: non-zero parameter count");
+            break;
+          }
+          break;
+        }
+        case kDeclImportTable: {
+          // Declares an import table.
+          CheckForPreviousSection(sections, kDeclSignatures, true);
+          int length;
+          uint32_t import_table_count =
+              consume_u32v(&length, "import table count");
+          module->import_table->reserve(SafeReserve(import_table_count));
+          // Decode import table.
+          for (uint32_t i = 0; i < import_table_count; i++) {
+            if (failed()) break;
+            TRACE("DecodeImportTable[%d] module+%d\n", i,
+                  static_cast<int>(pc_ - start_));
+
+            module->import_table->push_back({nullptr, 0, 0});
+            WasmImport* import = &module->import_table->back();
+
+            const byte* sigpos = pc_;
+            import->sig_index = consume_u16("signature index");
+
+            if (import->sig_index >= module->signatures->size()) {
+              error(sigpos, "invalid signature index");
+            } else {
+              import->sig = module->signatures->at(import->sig_index);
+            }
+            import->module_name_offset = consume_string("import module name");
+            import->function_name_offset =
+                consume_string("import function name");
+          }
+          break;
+        }
         case kDeclWLL: {
           // Reserved for experimentation by the Web Low-level Language project
           // which is augmenting the binary encoding with source code meta
           // information. This section does not affect the semantics of the code
           // and can be ignored by the runtime. https://github.com/JSStats/wll
           int length = 0;
-          uint32_t section_size = u32v(&length, "section size");
+          uint32_t section_size = consume_u32v(&length, "section size");
           if (pc_ + section_size > limit_ || pc_ + section_size < pc_) {
             error(pc_ - length, "invalid section size");
             break;
@@ -249,14 +302,14 @@
   FunctionResult DecodeSingleFunction(ModuleEnv* module_env,
                                       WasmFunction* function) {
     pc_ = start_;
-    function->sig = sig();                       // read signature
+    function->sig = consume_sig();                  // read signature
     function->name_offset = 0;                   // ---- name
     function->code_start_offset = off(pc_ + 8);  // ---- code start
     function->code_end_offset = off(limit_);     // ---- code end
-    function->local_int32_count = u16();         // read u16
-    function->local_int64_count = u16();         // read u16
-    function->local_float32_count = u16();       // read u16
-    function->local_float64_count = u16();       // read u16
+    function->local_i32_count = consume_u16();      // read u16
+    function->local_i64_count = consume_u16();      // read u16
+    function->local_f32_count = consume_u16();      // read u16
+    function->local_f64_count = consume_u16();      // read u16
     function->exported = false;                  // ---- exported
     function->external = false;                  // ---- external
 
@@ -271,7 +324,7 @@
   // Decodes a single function signature at {start}.
   FunctionSig* DecodeFunctionSignature(const byte* start) {
     pc_ = start;
-    FunctionSig* result = sig();
+    FunctionSig* result = consume_sig();
     return ok() ? result : nullptr;
   }
 
@@ -284,19 +337,19 @@
 
   // Decodes a single global entry inside a module starting at {pc_}.
   void DecodeGlobalInModule(WasmGlobal* global) {
-    global->name_offset = string("global name");
+    global->name_offset = consume_string("global name");
     global->type = mem_type();
     global->offset = 0;
-    global->exported = u8("exported") != 0;
+    global->exported = consume_u8("exported") != 0;
   }
 
   // Decodes a single function entry inside a module starting at {pc_}.
   void DecodeFunctionInModule(WasmModule* module, WasmFunction* function,
                               bool verify_body = true) {
-    byte decl_bits = u8("function decl");
+    byte decl_bits = consume_u8("function decl");
 
     const byte* sigpos = pc_;
-    function->sig_index = u16("signature index");
+    function->sig_index = consume_u16("signature index");
 
     if (function->sig_index >= module->signatures->size()) {
       return error(sigpos, "invalid signature index");
@@ -313,7 +366,7 @@
           (decl_bits & kDeclFunctionImport) == 0 ? " body" : "");
 
     if (decl_bits & kDeclFunctionName) {
-      function->name_offset = string("function name");
+      function->name_offset = consume_string("function name");
     }
 
     function->exported = decl_bits & kDeclFunctionExport;
@@ -325,13 +378,13 @@
     }
 
     if (decl_bits & kDeclFunctionLocals) {
-      function->local_int32_count = u16("int32 count");
-      function->local_int64_count = u16("int64 count");
-      function->local_float32_count = u16("float32 count");
-      function->local_float64_count = u16("float64 count");
+      function->local_i32_count = consume_u16("i32 count");
+      function->local_i64_count = consume_u16("i64 count");
+      function->local_f32_count = consume_u16("f32 count");
+      function->local_f64_count = consume_u16("f64 count");
     }
 
-    uint16_t size = u16("body size");
+    uint16_t size = consume_u16("body size");
     if (ok()) {
       if ((pc_ + size) > limit_) {
         return error(pc_, limit_,
@@ -345,35 +398,51 @@
     }
   }
 
+  bool IsWithinLimit(uint32_t limit, uint32_t offset, uint32_t size) {
+    if (offset > limit) return false;
+    if ((offset + size) < offset) return false;  // overflow
+    return (offset + size) <= limit;
+  }
+
   // Decodes a single data segment entry inside a module starting at {pc_}.
-  void DecodeDataSegmentInModule(WasmDataSegment* segment) {
-    segment->dest_addr =
-        u32("destination");  // TODO(titzer): check it's within the memory size.
-    segment->source_offset = offset("source offset");
-    segment->source_size =
-        u32("source size");  // TODO(titzer): check the size is reasonable.
-    segment->init = u8("init");
+  void DecodeDataSegmentInModule(WasmModule* module, WasmDataSegment* segment) {
+    segment->dest_addr = consume_u32("destination");
+    segment->source_offset = consume_offset("source offset");
+    segment->source_size = consume_u32("source size");
+    segment->init = consume_u8("init");
+
+    // Validate the data is in the module.
+    uint32_t module_limit = static_cast<uint32_t>(limit_ - start_);
+    if (!IsWithinLimit(module_limit, segment->source_offset,
+                       segment->source_size)) {
+      error(pc_ - sizeof(uint32_t), "segment out of bounds of module");
+    }
+
+    // Validate that the segment will fit into the (minimum) memory.
+    uint32_t memory_limit =
+        1 << (module ? module->min_mem_size_log2 : WasmModule::kMaxMemSize);
+    if (!IsWithinLimit(memory_limit, segment->dest_addr,
+                       segment->source_size)) {
+      error(pc_ - sizeof(uint32_t), "segment out of bounds of memory");
+    }
   }
 
   // Verifies the body (code) of a given function.
   void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
                           WasmFunction* function) {
     if (FLAG_trace_wasm_decode_time) {
-      // TODO(titzer): clean me up a bit.
       OFStream os(stdout);
-      os << "Verifying WASM function:";
-      if (function->name_offset > 0) {
-        os << menv->module->GetName(function->name_offset);
-      }
+      os << "Verifying WASM function " << WasmFunctionName(function, menv)
+         << std::endl;
       os << std::endl;
     }
     FunctionEnv fenv;
     fenv.module = menv;
     fenv.sig = function->sig;
-    fenv.local_int32_count = function->local_int32_count;
-    fenv.local_int64_count = function->local_int64_count;
-    fenv.local_float32_count = function->local_float32_count;
-    fenv.local_float64_count = function->local_float64_count;
+    fenv.local_i32_count = function->local_i32_count;
+    fenv.local_i64_count = function->local_i64_count;
+    fenv.local_f32_count = function->local_f32_count;
+    fenv.local_f64_count = function->local_f64_count;
     fenv.SumLocals();
 
     TreeResult result =
@@ -382,8 +451,7 @@
     if (result.failed()) {
       // Wrap the error message from the function decoder.
       std::ostringstream str;
-      str << "in function #" << func_num << ": ";
-      // TODO(titzer): add function name for the user?
+      str << "in function " << WasmFunctionName(function, menv) << ": ";
       str << result;
       std::string strval = str.str();
       const char* raw = strval.c_str();
@@ -400,8 +468,8 @@
 
   // Reads a single 32-bit unsigned integer interpreted as an offset, checking
   // the offset is within bounds and advances.
-  uint32_t offset(const char* name = nullptr) {
-    uint32_t offset = u32(name ? name : "offset");
+  uint32_t consume_offset(const char* name = nullptr) {
+    uint32_t offset = consume_u32(name ? name : "offset");
     if (offset > static_cast<uint32_t>(limit_ - start_)) {
       error(pc_ - sizeof(uint32_t), "offset out of bounds of module");
     }
@@ -410,13 +478,14 @@
 
   // Reads a single 32-bit unsigned integer interpreted as an offset into the
   // data and validating the string there and advances.
-  uint32_t string(const char* name = nullptr) {
-    return offset(name ? name : "string");  // TODO(titzer): validate string
+  uint32_t consume_string(const char* name = nullptr) {
+    // TODO(titzer): validate string
+    return consume_offset(name ? name : "string");
   }
 
   // Reads a single 8-bit integer, interpreting it as a local type.
-  LocalType local_type() {
-    byte val = u8("local type");
+  LocalType consume_local_type() {
+    byte val = consume_u8("local type");
     LocalTypeCode t = static_cast<LocalTypeCode>(val);
     switch (t) {
       case kLocalVoid:
@@ -437,7 +506,7 @@
 
   // Reads a single 8-bit integer, interpreting it as a memory type.
   MachineType mem_type() {
-    byte val = u8("memory type");
+    byte val = consume_u8("memory type");
     MemTypeCode t = static_cast<MemTypeCode>(val);
     switch (t) {
       case kMemI8:
@@ -467,14 +536,14 @@
   }
 
   // Parses an inline function signature.
-  FunctionSig* sig() {
-    byte count = u8("param count");
-    LocalType ret = local_type();
+  FunctionSig* consume_sig() {
+    byte count = consume_u8("param count");
+    LocalType ret = consume_local_type();
     FunctionSig::Builder builder(module_zone, ret == kAstStmt ? 0 : 1, count);
     if (ret != kAstStmt) builder.AddReturn(ret);
 
     for (int i = 0; i < count; i++) {
-      LocalType param = local_type();
+      LocalType param = consume_local_type();
       if (param == kAstStmt) error(pc_ - 1, "invalid void parameter type");
       builder.AddParam(param);
     }
diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc
index 80d8bdb..62a2676 100644
--- a/src/wasm/wasm-js.cc
+++ b/src/wasm/wasm-js.cc
@@ -37,6 +37,7 @@
 
 RawBuffer GetRawBufferArgument(
     ErrorThrower& thrower, const v8::FunctionCallbackInfo<v8::Value>& args) {
+  // TODO(titzer): allow typed array views.
   if (args.Length() < 1 || !args[0]->IsArrayBuffer()) {
     thrower.Error("Argument 0 must be an array buffer");
     return {nullptr, nullptr};
@@ -44,8 +45,6 @@
   Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(args[0]);
   ArrayBuffer::Contents contents = buffer->GetContents();
 
-  // TODO(titzer): allow offsets into buffers, views, etc.
-
   const byte* start = reinterpret_cast<const byte*>(contents.Data());
   const byte* end = start + contents.ByteLength();
 
@@ -100,33 +99,8 @@
   if (result.val) delete result.val;
 }
 
-
-void CompileRun(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  HandleScope scope(args.GetIsolate());
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-  ErrorThrower thrower(isolate, "WASM.compileRun()");
-
-  RawBuffer buffer = GetRawBufferArgument(thrower, args);
-  if (thrower.error()) return;
-
-  // Decode and pre-verify the functions before compiling and running.
-  i::Zone zone;
-  internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
-      isolate, &zone, buffer.start, buffer.end, true, false);
-
-  if (result.failed()) {
-    thrower.Failed("", result);
-  } else {
-    // Success. Compile and run!
-    int32_t retval = i::wasm::CompileAndRunWasmModule(isolate, result.val);
-    args.GetReturnValue().Set(retval);
-  }
-
-  if (result.val) delete result.val;
-}
-
-
-v8::internal::wasm::WasmModuleIndex* TranslateAsmModule(i::ParseInfo* info) {
+v8::internal::wasm::WasmModuleIndex* TranslateAsmModule(
+    i::ParseInfo* info, i::Handle<i::Object> foreign, ErrorThrower* thrower) {
   info->set_global();
   info->set_lazy(false);
   info->set_allow_lazy_parsing(false);
@@ -141,105 +115,34 @@
 
   v8::internal::AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
                                info->literal());
+  if (i::FLAG_enable_simd_asmjs) {
+    typer.set_allow_simd(true);
+  }
   if (!typer.Validate()) {
+    thrower->Error("Asm.js validation failed: %s", typer.error_message());
     return nullptr;
   }
 
   auto module = v8::internal::wasm::AsmWasmBuilder(
-                    info->isolate(), info->zone(), info->literal())
+                    info->isolate(), info->zone(), info->literal(), foreign)
                     .Run();
+
+  if (i::FLAG_dump_asmjs_wasm) {
+    FILE* wasm_file = fopen(i::FLAG_asmjs_wasm_dumpfile, "wb");
+    if (wasm_file) {
+      fwrite(module->Begin(), module->End() - module->Begin(), 1, wasm_file);
+      fclose(wasm_file);
+    }
+  }
+
   return module;
 }
 
 
-void AsmCompileRun(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  HandleScope scope(args.GetIsolate());
+void InstantiateModuleCommon(const v8::FunctionCallbackInfo<v8::Value>& args,
+                             const byte* start, const byte* end,
+                             ErrorThrower* thrower, bool must_decode) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-  ErrorThrower thrower(isolate, "WASM.asmCompileRun()");
-
-  if (args.Length() != 1) {
-    thrower.Error("Invalid argument count");
-    return;
-  }
-  if (!args[0]->IsString()) {
-    thrower.Error("Invalid argument count");
-    return;
-  }
-
-  i::Factory* factory = isolate->factory();
-  i::Zone zone;
-  Local<String> source = Local<String>::Cast(args[0]);
-  i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
-  i::ParseInfo info(&zone, script);
-
-  auto module = TranslateAsmModule(&info);
-  if (module == nullptr) {
-    thrower.Error("Asm.js validation failed");
-    return;
-  }
-
-  int32_t result = v8::internal::wasm::CompileAndRunWasmModule(
-      isolate, module->Begin(), module->End(), true);
-  args.GetReturnValue().Set(result);
-}
-
-
-// TODO(aseemgarg): deal with arraybuffer and foreign functions
-void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  HandleScope scope(args.GetIsolate());
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-  ErrorThrower thrower(isolate, "WASM.instantiateModuleFromAsm()");
-
-  if (args.Length() != 1) {
-    thrower.Error("Invalid argument count");
-    return;
-  }
-  if (!args[0]->IsString()) {
-    thrower.Error("Invalid argument count");
-    return;
-  }
-
-  i::Factory* factory = isolate->factory();
-  i::Zone zone;
-  Local<String> source = Local<String>::Cast(args[0]);
-  i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
-  i::ParseInfo info(&zone, script);
-
-  auto module = TranslateAsmModule(&info);
-  if (module == nullptr) {
-    thrower.Error("Asm.js validation failed");
-    return;
-  }
-
-  i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
-  internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
-      isolate, &zone, module->Begin(), module->End(), false, false);
-
-  if (result.failed()) {
-    thrower.Failed("", result);
-  } else {
-    // Success. Instantiate the module and return the object.
-    i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
-
-    i::MaybeHandle<i::JSObject> object =
-        result.val->Instantiate(isolate, ffi, memory);
-
-    if (!object.is_null()) {
-      args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
-    }
-  }
-
-  if (result.val) delete result.val;
-}
-
-
-void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  HandleScope scope(args.GetIsolate());
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-  ErrorThrower thrower(isolate, "WASM.instantiateModule()");
-
-  RawBuffer buffer = GetRawBufferArgument(thrower, args);
-  if (buffer.start == nullptr) return;
 
   i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
   if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
@@ -252,10 +155,12 @@
   // Verification will happen during compilation.
   i::Zone zone;
   internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
-      isolate, &zone, buffer.start, buffer.end, false, false);
+      isolate, &zone, start, end, false, false);
 
-  if (result.failed()) {
-    thrower.Failed("", result);
+  if (result.failed() && must_decode) {
+    thrower->Error("Asm.js converted module failed to decode");
+  } else if (result.failed()) {
+    thrower->Failed("", result);
   } else {
     // Success. Instantiate the module and return the object.
     i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
@@ -274,6 +179,49 @@
 
   if (result.val) delete result.val;
 }
+
+
+void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  HandleScope scope(args.GetIsolate());
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+  ErrorThrower thrower(isolate, "WASM.instantiateModuleFromAsm()");
+
+  if (!args[0]->IsString()) {
+    thrower.Error("Asm module text should be a string");
+    return;
+  }
+
+  i::Factory* factory = isolate->factory();
+  i::Zone zone;
+  Local<String> source = Local<String>::Cast(args[0]);
+  i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
+  i::ParseInfo info(&zone, script);
+
+  i::Handle<i::Object> foreign;
+  if (args.Length() > 1 && args[1]->IsObject()) {
+    Local<Object> local_foreign = Local<Object>::Cast(args[1]);
+    foreign = v8::Utils::OpenHandle(*local_foreign);
+  }
+
+  auto module = TranslateAsmModule(&info, foreign, &thrower);
+  if (module == nullptr) {
+    return;
+  }
+
+  InstantiateModuleCommon(args, module->Begin(), module->End(), &thrower, true);
+}
+
+
+void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  HandleScope scope(args.GetIsolate());
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+  ErrorThrower thrower(isolate, "WASM.instantiateModule()");
+
+  RawBuffer buffer = GetRawBufferArgument(thrower, args);
+  if (buffer.start == nullptr) return;
+
+  InstantiateModuleCommon(args, buffer.start, buffer.end, &thrower, false);
+}
 }  // namespace
 
 
@@ -322,11 +270,9 @@
   JSObject::AddProperty(global, name, wasm_object, attributes);
 
   // Install functions on the WASM object.
-  InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
   InstallFunc(isolate, wasm_object, "verifyModule", VerifyModule);
   InstallFunc(isolate, wasm_object, "verifyFunction", VerifyFunction);
-  InstallFunc(isolate, wasm_object, "compileRun", CompileRun);
-  InstallFunc(isolate, wasm_object, "asmCompileRun", AsmCompileRun);
+  InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
   InstallFunc(isolate, wasm_object, "instantiateModuleFromAsm",
               InstantiateModuleFromAsm);
 }
diff --git a/src/wasm/wasm-macro-gen.h b/src/wasm/wasm-macro-gen.h
index 470804a..dd653c1 100644
--- a/src/wasm/wasm-macro-gen.h
+++ b/src/wasm/wasm-macro-gen.h
@@ -22,10 +22,10 @@
 #define WASM_SELECT(cond, tval, fval) kExprSelect, cond, tval, fval
 #define WASM_BR(depth) kExprBr, static_cast<byte>(depth), kExprNop
 #define WASM_BR_IF(depth, cond) \
-  kExprBrIf, static_cast<byte>(depth), cond, kExprNop
+  kExprBrIf, static_cast<byte>(depth), kExprNop, cond
 #define WASM_BRV(depth, val) kExprBr, static_cast<byte>(depth), val
-#define WASM_BRV_IF(depth, cond, val) \
-  kExprBrIf, static_cast<byte>(depth), cond, val
+#define WASM_BRV_IF(depth, val, cond) \
+  kExprBrIf, static_cast<byte>(depth), val, cond
 #define WASM_BREAK(depth) kExprBr, static_cast<byte>(depth + 1), kExprNop
 #define WASM_CONTINUE(depth) kExprBr, static_cast<byte>(depth), kExprNop
 #define WASM_BREAKV(depth, val) kExprBr, static_cast<byte>(depth + 1), val
@@ -104,9 +104,12 @@
       static_cast<byte>(offset), index, val
 #define WASM_CALL_FUNCTION(index, ...) \
   kExprCallFunction, static_cast<byte>(index), __VA_ARGS__
+#define WASM_CALL_IMPORT(index, ...) \
+  kExprCallImport, static_cast<byte>(index), __VA_ARGS__
 #define WASM_CALL_INDIRECT(index, func, ...) \
   kExprCallIndirect, static_cast<byte>(index), func, __VA_ARGS__
 #define WASM_CALL_FUNCTION0(index) kExprCallFunction, static_cast<byte>(index)
+#define WASM_CALL_IMPORT0(index) kExprCallImport, static_cast<byte>(index)
 #define WASM_CALL_INDIRECT0(index, func) \
   kExprCallIndirect, static_cast<byte>(index), func
 #define WASM_NOT(x) kExprBoolNot, x
diff --git a/src/wasm/wasm-module.cc b/src/wasm/wasm-module.cc
index fd24280..02d197c 100644
--- a/src/wasm/wasm-module.cc
+++ b/src/wasm/wasm-module.cc
@@ -31,33 +31,32 @@
 
 
 std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
-  os << "WASM function with signature ";
+  os << "WASM function with signature " << *function.sig;
 
-  // TODO(titzer): factor out rendering of signatures.
-  if (function.sig->return_count() == 0) os << "v";
-  for (size_t i = 0; i < function.sig->return_count(); i++) {
-    os << WasmOpcodes::ShortNameOf(function.sig->GetReturn(i));
-  }
-  os << "_";
-  if (function.sig->parameter_count() == 0) os << "v";
-  for (size_t i = 0; i < function.sig->parameter_count(); i++) {
-    os << WasmOpcodes::ShortNameOf(function.sig->GetParam(i));
-  }
   os << " locals: ";
-  if (function.local_int32_count)
-    os << function.local_int32_count << " int32s ";
-  if (function.local_int64_count)
-    os << function.local_int64_count << " int64s ";
-  if (function.local_float32_count)
-    os << function.local_float32_count << " float32s ";
-  if (function.local_float64_count)
-    os << function.local_float64_count << " float64s ";
+  if (function.local_i32_count) os << function.local_i32_count << " i32s ";
+  if (function.local_i64_count) os << function.local_i64_count << " i64s ";
+  if (function.local_f32_count) os << function.local_f32_count << " f32s ";
+  if (function.local_f64_count) os << function.local_f64_count << " f64s ";
 
   os << " code bytes: "
      << (function.code_end_offset - function.code_start_offset);
   return os;
 }
 
+std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
+  os << "#" << pair.function_->func_index << ":";
+  if (pair.function_->name_offset > 0) {
+    if (pair.module_) {
+      os << pair.module_->GetName(pair.function_->name_offset);
+    } else {
+      os << "+" << pair.function_->func_index;
+    }
+  } else {
+    os << "?";
+  }
+  return os;
+}
 
 // A helper class for compiling multiple wasm functions that offers
 // placeholder code objects for calling functions that are not yet compiled.
@@ -193,35 +192,98 @@
   return fixed;
 }
 
-
-Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, int size,
+Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
                                      byte** backing_store) {
-  void* memory = isolate->array_buffer_allocator()->Allocate(size);
-  if (!memory) return Handle<JSArrayBuffer>::null();
+  if (size > (1 << WasmModule::kMaxMemSize)) {
+    // TODO(titzer): lift restriction on maximum memory allocated here.
+    *backing_store = nullptr;
+    return Handle<JSArrayBuffer>::null();
+  }
+  void* memory =
+      isolate->array_buffer_allocator()->Allocate(static_cast<int>(size));
+  if (!memory) {
+    *backing_store = nullptr;
+    return Handle<JSArrayBuffer>::null();
+  }
+
   *backing_store = reinterpret_cast<byte*>(memory);
 
 #if DEBUG
   // Double check the API allocator actually zero-initialized the memory.
-  for (int i = 0; i < size; i++) {
-    DCHECK_EQ(0, (*backing_store)[i]);
+  byte* bytes = reinterpret_cast<byte*>(*backing_store);
+  for (size_t i = 0; i < size; i++) {
+    DCHECK_EQ(0, bytes[i]);
   }
 #endif
 
   Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
-  JSArrayBuffer::Setup(buffer, isolate, false, memory, size);
+  JSArrayBuffer::Setup(buffer, isolate, false, memory, static_cast<int>(size));
   buffer->set_is_neuterable(false);
   return buffer;
 }
+
+// Set the memory for a module instance to be the {memory} array buffer.
+void SetMemory(WasmModuleInstance* instance, Handle<JSArrayBuffer> memory) {
+  memory->set_is_neuterable(false);
+  instance->mem_start = reinterpret_cast<byte*>(memory->backing_store());
+  instance->mem_size = memory->byte_length()->Number();
+  instance->mem_buffer = memory;
+}
+
+// Allocate memory for a module instance as a new JSArrayBuffer.
+bool AllocateMemory(ErrorThrower* thrower, Isolate* isolate,
+                    WasmModuleInstance* instance) {
+  DCHECK(instance->module);
+  DCHECK(instance->mem_buffer.is_null());
+
+  if (instance->module->min_mem_size_log2 > WasmModule::kMaxMemSize) {
+    thrower->Error("Out of memory: wasm memory too large");
+    return false;
+  }
+  instance->mem_size = static_cast<size_t>(1)
+                       << instance->module->min_mem_size_log2;
+  instance->mem_buffer =
+      NewArrayBuffer(isolate, instance->mem_size, &instance->mem_start);
+  if (!instance->mem_start) {
+    thrower->Error("Out of memory: wasm memory");
+    instance->mem_size = 0;
+    return false;
+  }
+  return true;
+}
+
+bool AllocateGlobals(ErrorThrower* thrower, Isolate* isolate,
+                     WasmModuleInstance* instance) {
+  instance->globals_size = AllocateGlobalsOffsets(instance->module->globals);
+
+  if (instance->globals_size > 0) {
+    instance->globals_buffer = NewArrayBuffer(isolate, instance->globals_size,
+                                              &instance->globals_start);
+    if (!instance->globals_start) {
+      // Not enough space for backing store of globals.
+      thrower->Error("Out of memory: wasm globals");
+      return false;
+    }
+  }
+  return true;
+}
 }  // namespace
 
-
 WasmModule::WasmModule()
-    : globals(nullptr),
+    : shared_isolate(nullptr),
+      module_start(nullptr),
+      module_end(nullptr),
+      min_mem_size_log2(0),
+      max_mem_size_log2(0),
+      mem_export(false),
+      mem_external(false),
+      start_function_index(-1),
+      globals(nullptr),
       signatures(nullptr),
       functions(nullptr),
       data_segments(nullptr),
-      function_table(nullptr) {}
-
+      function_table(nullptr),
+      import_table(nullptr) {}
 
 WasmModule::~WasmModule() {
   if (globals) delete globals;
@@ -229,8 +291,33 @@
   if (functions) delete functions;
   if (data_segments) delete data_segments;
   if (function_table) delete function_table;
+  if (import_table) delete import_table;
 }
 
+static MaybeHandle<JSFunction> LookupFunction(ErrorThrower& thrower,
+                                              Handle<JSObject> ffi,
+                                              uint32_t index,
+                                              Handle<String> name,
+                                              const char* cstr) {
+  if (!ffi.is_null()) {
+    MaybeHandle<Object> result = Object::GetProperty(ffi, name);
+    if (!result.is_null()) {
+      Handle<Object> obj = result.ToHandleChecked();
+      if (obj->IsJSFunction()) {
+        return Handle<JSFunction>::cast(obj);
+      } else {
+        thrower.Error("FFI function #%d:%s is not a JSFunction.", index, cstr);
+        return MaybeHandle<JSFunction>();
+      }
+    } else {
+      thrower.Error("FFI function #%d:%s not found.", index, cstr);
+      return MaybeHandle<JSFunction>();
+    }
+  } else {
+    thrower.Error("FFI table is not an object.");
+    return MaybeHandle<JSFunction>();
+  }
+}
 
 // Instantiates a wasm module as a JSObject.
 //  * allocates a backing store of {mem_size} bytes.
@@ -242,95 +329,91 @@
                                               Handle<JSArrayBuffer> memory) {
   this->shared_isolate = isolate;  // TODO(titzer): have a real shared isolate.
   ErrorThrower thrower(isolate, "WasmModule::Instantiate()");
-
   Factory* factory = isolate->factory();
-  // Memory is bigger than maximum supported size.
-  if (memory.is_null() && min_mem_size_log2 > kMaxMemSize) {
-    thrower.Error("Out of memory: wasm memory too large");
-    return MaybeHandle<JSObject>();
-  }
 
+  //-------------------------------------------------------------------------
+  // Allocate the instance and its JS counterpart.
+  //-------------------------------------------------------------------------
   Handle<Map> map = factory->NewMap(
       JS_OBJECT_TYPE,
       JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
-
-  //-------------------------------------------------------------------------
-  // Allocate the module object.
-  //-------------------------------------------------------------------------
-  Handle<JSObject> module = factory->NewJSObjectFromMap(map, TENURED);
+  WasmModuleInstance instance(this);
+  std::vector<Handle<Code>> import_code;
+  instance.context = isolate->native_context();
+  instance.js_object = factory->NewJSObjectFromMap(map, TENURED);
   Handle<FixedArray> code_table =
       factory->NewFixedArray(static_cast<int>(functions->size()), TENURED);
+  instance.js_object->SetInternalField(kWasmModuleCodeTable, *code_table);
 
   //-------------------------------------------------------------------------
-  // Allocate the linear memory.
+  // Allocate and initialize the linear memory.
   //-------------------------------------------------------------------------
-  uint32_t mem_size = 1 << min_mem_size_log2;
-  byte* mem_addr = nullptr;
-  Handle<JSArrayBuffer> mem_buffer;
-  if (!memory.is_null()) {
-    memory->set_is_neuterable(false);
-    mem_addr = reinterpret_cast<byte*>(memory->backing_store());
-    mem_size = memory->byte_length()->Number();
-    mem_buffer = memory;
-  } else {
-    mem_buffer = NewArrayBuffer(isolate, mem_size, &mem_addr);
-    if (!mem_addr) {
-      // Not enough space for backing store of memory
-      thrower.Error("Out of memory: wasm memory");
+  if (memory.is_null()) {
+    if (!AllocateMemory(&thrower, isolate, &instance)) {
       return MaybeHandle<JSObject>();
     }
+  } else {
+    SetMemory(&instance, memory);
   }
-
-  // Load initialized data segments.
-  LoadDataSegments(this, mem_addr, mem_size);
-
-  module->SetInternalField(kWasmMemArrayBuffer, *mem_buffer);
+  instance.js_object->SetInternalField(kWasmMemArrayBuffer,
+                                       *instance.mem_buffer);
+  LoadDataSegments(this, instance.mem_start, instance.mem_size);
 
   if (mem_export) {
     // Export the memory as a named property.
     Handle<String> name = factory->InternalizeUtf8String("memory");
-    JSObject::AddProperty(module, name, mem_buffer, READ_ONLY);
+    JSObject::AddProperty(instance.js_object, name, instance.mem_buffer,
+                          READ_ONLY);
   }
 
   //-------------------------------------------------------------------------
   // Allocate the globals area if necessary.
   //-------------------------------------------------------------------------
-  size_t globals_size = AllocateGlobalsOffsets(globals);
-  byte* globals_addr = nullptr;
-  if (globals_size > 0) {
-    Handle<JSArrayBuffer> globals_buffer =
-        NewArrayBuffer(isolate, mem_size, &globals_addr);
-    if (!globals_addr) {
-      // Not enough space for backing store of globals.
-      thrower.Error("Out of memory: wasm globals");
-      return MaybeHandle<JSObject>();
-    }
+  if (!AllocateGlobals(&thrower, isolate, &instance)) {
+    return MaybeHandle<JSObject>();
+  }
+  if (!instance.globals_buffer.is_null()) {
+    instance.js_object->SetInternalField(kWasmGlobalsArrayBuffer,
+                                         *instance.globals_buffer);
+  }
 
-    module->SetInternalField(kWasmGlobalsArrayBuffer, *globals_buffer);
-  } else {
-    module->SetInternalField(kWasmGlobalsArrayBuffer, Smi::FromInt(0));
+  //-------------------------------------------------------------------------
+  // Compile wrappers to imported functions.
+  //-------------------------------------------------------------------------
+  uint32_t index = 0;
+  instance.function_table = BuildFunctionTable(isolate, this);
+  WasmLinker linker(isolate, functions->size());
+  ModuleEnv module_env;
+  module_env.module = this;
+  module_env.instance = &instance;
+  module_env.linker = &linker;
+  module_env.asm_js = false;
+
+  if (import_table->size() > 0) {
+    instance.import_code = &import_code;
+    instance.import_code->reserve(import_table->size());
+    for (const WasmImport& import : *import_table) {
+      const char* cstr = GetName(import.function_name_offset);
+      Handle<String> name = factory->InternalizeUtf8String(cstr);
+      MaybeHandle<JSFunction> function =
+          LookupFunction(thrower, ffi, index, name, cstr);
+      if (function.is_null()) return MaybeHandle<JSObject>();
+      Handle<Code> code = compiler::CompileWasmToJSWrapper(
+          isolate, &module_env, function.ToHandleChecked(), import.sig, cstr);
+      instance.import_code->push_back(code);
+      index++;
+    }
   }
 
   //-------------------------------------------------------------------------
   // Compile all functions in the module.
   //-------------------------------------------------------------------------
-  int index = 0;
-  WasmLinker linker(isolate, functions->size());
-  ModuleEnv module_env;
-  module_env.module = this;
-  module_env.mem_start = reinterpret_cast<uintptr_t>(mem_addr);
-  module_env.mem_end = reinterpret_cast<uintptr_t>(mem_addr) + mem_size;
-  module_env.globals_area = reinterpret_cast<uintptr_t>(globals_addr);
-  module_env.linker = &linker;
-  module_env.function_code = nullptr;
-  module_env.function_table = BuildFunctionTable(isolate, this);
-  module_env.memory = memory;
-  module_env.context = isolate->native_context();
-  module_env.asm_js = false;
 
   // First pass: compile each function and initialize the code table.
+  index = 0;
   for (const WasmFunction& func : *functions) {
     if (thrower.error()) break;
+    DCHECK_EQ(index, func.func_index);
 
     const char* cstr = GetName(func.name_offset);
     Handle<String> name = factory->InternalizeUtf8String(cstr);
@@ -338,38 +421,21 @@
     Handle<JSFunction> function = Handle<JSFunction>::null();
     if (func.external) {
       // Lookup external function in FFI object.
-      if (!ffi.is_null()) {
-        MaybeHandle<Object> result = Object::GetProperty(ffi, name);
-        if (!result.is_null()) {
-          Handle<Object> obj = result.ToHandleChecked();
-          if (obj->IsJSFunction()) {
-            function = Handle<JSFunction>::cast(obj);
-            code = compiler::CompileWasmToJSWrapper(isolate, &module_env,
-                                                    function, index);
-          } else {
-            thrower.Error("FFI function #%d:%s is not a JSFunction.", index,
-                          cstr);
-            return MaybeHandle<JSObject>();
-          }
-        } else {
-          thrower.Error("FFI function #%d:%s not found.", index, cstr);
-          return MaybeHandle<JSObject>();
-        }
-      } else {
-        thrower.Error("FFI table is not an object.");
-        return MaybeHandle<JSObject>();
-      }
+      MaybeHandle<JSFunction> function =
+          LookupFunction(thrower, ffi, index, name, cstr);
+      if (function.is_null()) return MaybeHandle<JSObject>();
+      code = compiler::CompileWasmToJSWrapper(
+          isolate, &module_env, function.ToHandleChecked(), func.sig, cstr);
     } else {
       // Compile the function.
-      code = compiler::CompileWasmFunction(thrower, isolate, &module_env, func,
-                                           index);
+      code = compiler::CompileWasmFunction(thrower, isolate, &module_env, func);
       if (code.is_null()) {
         thrower.Error("Compilation of #%d:%s failed.", index, cstr);
         return MaybeHandle<JSObject>();
       }
       if (func.exported) {
-        function = compiler::CompileJSToWasmWrapper(isolate, &module_env, name,
-                                                    code, module, index);
+        function = compiler::CompileJSToWasmWrapper(
+            isolate, &module_env, name, code, instance.js_object, index);
       }
     }
     if (!code.is_null()) {
@@ -379,27 +445,54 @@
     }
     if (func.exported) {
       // Exported functions are installed as read-only properties on the module.
-      JSObject::AddProperty(module, name, function, READ_ONLY);
+      JSObject::AddProperty(instance.js_object, name, function, READ_ONLY);
     }
     index++;
   }
 
   // Second pass: patch all direct call sites.
-  linker.Link(module_env.function_table, this->function_table);
+  linker.Link(instance.function_table, this->function_table);
+  instance.js_object->SetInternalField(kWasmModuleFunctionTable,
+                                       Smi::FromInt(0));
 
-  module->SetInternalField(kWasmModuleFunctionTable, Smi::FromInt(0));
-  module->SetInternalField(kWasmModuleCodeTable, *code_table);
-  return module;
+  // Run the start function if one was specified.
+  if (this->start_function_index >= 0) {
+    HandleScope scope(isolate);
+    uint32_t index = static_cast<uint32_t>(this->start_function_index);
+    Handle<String> name = isolate->factory()->NewStringFromStaticChars("start");
+    Handle<Code> code = linker.GetFunctionCode(index);
+    Handle<JSFunction> jsfunc = compiler::CompileJSToWasmWrapper(
+        isolate, &module_env, name, code, instance.js_object, index);
+
+    // Call the JS function.
+    Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+    MaybeHandle<Object> retval =
+        Execution::Call(isolate, jsfunc, undefined, 0, nullptr);
+
+    if (retval.is_null()) {
+      thrower.Error("WASM.instantiateModule(): start function failed");
+    }
+  }
+  return instance.js_object;
 }
 
 
 Handle<Code> ModuleEnv::GetFunctionCode(uint32_t index) {
   DCHECK(IsValidFunction(index));
   if (linker) return linker->GetFunctionCode(index);
-  if (function_code) return function_code->at(index);
+  if (instance && instance->function_code) {
+    return instance->function_code->at(index);
+  }
   return Handle<Code>::null();
 }
 
+Handle<Code> ModuleEnv::GetImportCode(uint32_t index) {
+  DCHECK(IsValidImport(index));
+  if (instance && instance->import_code) {
+    return instance->import_code->at(index);
+  }
+  return Handle<Code>::null();
+}
 
 compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
                                                        uint32_t index) {
@@ -436,43 +529,45 @@
 
 int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module) {
   ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+  WasmModuleInstance instance(module);
 
-  // Allocate temporary linear memory and globals.
-  size_t mem_size = 1 << module->min_mem_size_log2;
-  size_t globals_size = AllocateGlobalsOffsets(module->globals);
+  // Allocate and initialize the linear memory.
+  if (!AllocateMemory(&thrower, isolate, &instance)) {
+    return -1;
+  }
+  LoadDataSegments(module, instance.mem_start, instance.mem_size);
 
-  base::SmartArrayPointer<byte> mem_addr(new byte[mem_size]);
-  base::SmartArrayPointer<byte> globals_addr(new byte[globals_size]);
+  // Allocate the globals area if necessary.
+  if (!AllocateGlobals(&thrower, isolate, &instance)) {
+    return -1;
+  }
 
-  memset(mem_addr.get(), 0, mem_size);
-  memset(globals_addr.get(), 0, globals_size);
+  // Build the function table.
+  instance.function_table = BuildFunctionTable(isolate, module);
 
   // Create module environment.
   WasmLinker linker(isolate, module->functions->size());
   ModuleEnv module_env;
   module_env.module = module;
-  module_env.mem_start = reinterpret_cast<uintptr_t>(mem_addr.get());
-  module_env.mem_end = reinterpret_cast<uintptr_t>(mem_addr.get()) + mem_size;
-  module_env.globals_area = reinterpret_cast<uintptr_t>(globals_addr.get());
+  module_env.instance = &instance;
   module_env.linker = &linker;
-  module_env.function_code = nullptr;
-  module_env.function_table = BuildFunctionTable(isolate, module);
   module_env.asm_js = false;
 
-  // Load data segments.
-  // TODO(titzer): throw instead of crashing if segments don't fit in memory?
-  LoadDataSegments(module, mem_addr.get(), mem_size);
-
   // Compile all functions.
   Handle<Code> main_code = Handle<Code>::null();  // record last code.
-  int index = 0;
+  uint32_t index = 0;
+  int main_index = 0;
   for (const WasmFunction& func : *module->functions) {
+    DCHECK_EQ(index, func.func_index);
     if (!func.external) {
       // Compile the function and install it in the code table.
-      Handle<Code> code = compiler::CompileWasmFunction(
-          thrower, isolate, &module_env, func, index);
+      Handle<Code> code =
+          compiler::CompileWasmFunction(thrower, isolate, &module_env, func);
       if (!code.is_null()) {
-        if (func.exported) main_code = code;
+        if (func.exported) {
+          main_code = code;
+          main_index = index;
+        }
         linker.Finish(index, code);
       }
       if (thrower.error()) return -1;
@@ -480,30 +575,37 @@
     index++;
   }
 
-  if (!main_code.is_null()) {
-    linker.Link(module_env.function_table, module->function_table);
-#if USE_SIMULATOR && V8_TARGET_ARCH_ARM64
-    // Run the main code on arm64 simulator.
-    Simulator* simulator = Simulator::current(isolate);
-    Simulator::CallArgument args[] = {Simulator::CallArgument(0),
-                                      Simulator::CallArgument::End()};
-    return static_cast<int32_t>(simulator->CallInt64(main_code->entry(), args));
-#elif USE_SIMULATOR
-    // Run the main code on simulator.
-    Simulator* simulator = Simulator::current(isolate);
-    return static_cast<int32_t>(
-        simulator->Call(main_code->entry(), 4, 0, 0, 0, 0));
-#else
-    // Run the main code as raw machine code.
-    int32_t (*raw_func)() = reinterpret_cast<int32_t (*)()>(
-        reinterpret_cast<uintptr_t>(main_code->entry()));
-    return raw_func();
-#endif
-  } else {
-    // No main code was found.
-    isolate->Throw(*isolate->factory()->NewStringFromStaticChars(
-        "WASM.compileRun() failed: no valid main code produced."));
+  if (main_code.is_null()) {
+    thrower.Error("WASM.compileRun() failed: no main code found");
+    return -1;
   }
+
+  linker.Link(instance.function_table, instance.module->function_table);
+
+  // Wrap the main code so it can be called as a JS function.
+  Handle<String> name = isolate->factory()->NewStringFromStaticChars("main");
+  Handle<JSObject> module_object = Handle<JSObject>(0, isolate);
+  Handle<JSFunction> jsfunc = compiler::CompileJSToWasmWrapper(
+      isolate, &module_env, name, main_code, module_object, main_index);
+
+  // Call the JS function.
+  Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+  MaybeHandle<Object> retval =
+      Execution::Call(isolate, jsfunc, undefined, 0, nullptr);
+
+  // The result should be a number.
+  if (retval.is_null()) {
+    thrower.Error("WASM.compileRun() failed: Invocation was null");
+    return -1;
+  }
+  Handle<Object> result = retval.ToHandleChecked();
+  if (result->IsSmi()) {
+    return Smi::cast(*result)->value();
+  }
+  if (result->IsHeapNumber()) {
+    return static_cast<int32_t>(HeapNumber::cast(*result)->value());
+  }
+  thrower.Error("WASM.compileRun() failed: Return value should be number");
   return -1;
 }
 }  // namespace wasm
diff --git a/src/wasm/wasm-module.h b/src/wasm/wasm-module.h
index 5e2ba58..5f5777c 100644
--- a/src/wasm/wasm-module.h
+++ b/src/wasm/wasm-module.h
@@ -30,11 +30,13 @@
   kDeclGlobals = 0x03,
   kDeclDataSegments = 0x04,
   kDeclFunctionTable = 0x05,
-  kDeclWLL = 0x11,
   kDeclEnd = 0x06,
+  kDeclStartFunction = 0x07,
+  kDeclImportTable = 0x08,
+  kDeclWLL = 0x11,
 };
 
-static const int kMaxModuleSectionCode = 6;
+static const int kMaxModuleSectionCode = 0x11;
 
 enum WasmFunctionDeclBit {
   kDeclFunctionName = 0x01,
@@ -48,22 +50,29 @@
 static const size_t kDeclGlobalSize = 6;
 static const size_t kDeclDataSegmentSize = 13;
 
-// Static representation of a wasm function.
+// Static representation of a WASM function.
 struct WasmFunction {
   FunctionSig* sig;      // signature of the function.
+  uint32_t func_index;   // index into the function table.
   uint16_t sig_index;    // index into the signature table.
   uint32_t name_offset;  // offset in the module bytes of the name, if any.
   uint32_t code_start_offset;    // offset in the module bytes of code start.
   uint32_t code_end_offset;      // offset in the module bytes of code end.
-  uint16_t local_int32_count;    // number of int32 local variables.
-  uint16_t local_int64_count;    // number of int64 local variables.
-  uint16_t local_float32_count;  // number of float32 local variables.
-  uint16_t local_float64_count;  // number of float64 local variables.
+  uint16_t local_i32_count;      // number of i32 local variables.
+  uint16_t local_i64_count;      // number of i64 local variables.
+  uint16_t local_f32_count;      // number of f32 local variables.
+  uint16_t local_f64_count;      // number of f64 local variables.
   bool exported;                 // true if this function is exported.
   bool external;  // true if this function is externally supplied.
 };
 
-struct ModuleEnv;  // forward declaration of decoder interface.
+// Static representation of an imported WASM function.
+struct WasmImport {
+  FunctionSig* sig;               // signature of the function.
+  uint16_t sig_index;             // index into the signature table.
+  uint32_t module_name_offset;    // offset in module bytes of the module name.
+  uint32_t function_name_offset;  // offset in module bytes of the import name.
+};
 
 // Static representation of a wasm global variable.
 struct WasmGlobal {
@@ -93,25 +102,27 @@
   uint8_t max_mem_size_log2;  // maximum size of the memory (log base 2).
   bool mem_export;            // true if the memory is exported.
   bool mem_external;          // true if the memory is external.
+  int start_function_index;   // start function, if any.
 
   std::vector<WasmGlobal>* globals;             // globals in this module.
   std::vector<FunctionSig*>* signatures;        // signatures in this module.
   std::vector<WasmFunction>* functions;         // functions in this module.
   std::vector<WasmDataSegment>* data_segments;  // data segments in this module.
   std::vector<uint16_t>* function_table;        // function table.
+  std::vector<WasmImport>* import_table;        // import table.
 
   WasmModule();
   ~WasmModule();
 
   // Get a pointer to a string stored in the module bytes representing a name.
-  const char* GetName(uint32_t offset) {
-    CHECK(BoundsCheck(offset, offset + 1));
+  const char* GetName(uint32_t offset) const {
     if (offset == 0) return "<?>";  // no name.
+    CHECK(BoundsCheck(offset, offset + 1));
     return reinterpret_cast<const char*>(module_start + offset);
   }
 
   // Checks the given offset range is contained within the module bytes.
-  bool BoundsCheck(uint32_t start, uint32_t end) {
+  bool BoundsCheck(uint32_t start, uint32_t end) const {
     size_t size = module_end - module_start;
     return start < size && end < size;
   }
@@ -121,22 +132,42 @@
                                     Handle<JSArrayBuffer> memory);
 };
 
+// An instantiated WASM module, including memory, function table, etc.
+struct WasmModuleInstance {
+  WasmModule* module;  // static representation of the module.
+  // -- Heap allocated --------------------------------------------------------
+  Handle<JSObject> js_object;            // JavaScript module object.
+  Handle<Context> context;               // JavaScript native context.
+  Handle<JSArrayBuffer> mem_buffer;      // Handle to array buffer of memory.
+  Handle<JSArrayBuffer> globals_buffer;  // Handle to array buffer of globals.
+  Handle<FixedArray> function_table;     // indirect function table.
+  std::vector<Handle<Code>>* function_code;  // code objects for each function.
+  std::vector<Handle<Code>>* import_code;   // code objects for each import.
+  // -- raw memory ------------------------------------------------------------
+  byte* mem_start;  // start of linear memory.
+  size_t mem_size;  // size of the linear memory.
+  // -- raw globals -----------------------------------------------------------
+  byte* globals_start;  // start of the globals area.
+  size_t globals_size;  // size of the globals area.
+
+  explicit WasmModuleInstance(WasmModule* m)
+      : module(m),
+        function_code(nullptr),
+        mem_start(nullptr),
+        mem_size(0),
+        globals_start(nullptr),
+        globals_size(0) {}
+};
+
 // forward declaration.
 class WasmLinker;
 
 // Interface provided to the decoder/graph builder which contains only
 // minimal information about the globals, functions, and function tables.
 struct ModuleEnv {
-  uintptr_t globals_area;  // address of the globals area.
-  uintptr_t mem_start;     // address of the start of linear memory.
-  uintptr_t mem_end;       // address of the end of linear memory.
-
   WasmModule* module;
+  WasmModuleInstance* instance;
   WasmLinker* linker;
-  std::vector<Handle<Code>>* function_code;
-  Handle<FixedArray> function_table;
-  Handle<JSArrayBuffer> memory;
-  Handle<Context> context;
   bool asm_js;  // true if the module originated from asm.js.
 
   bool IsValidGlobal(uint32_t index) {
@@ -148,6 +179,9 @@
   bool IsValidSignature(uint32_t index) {
     return module && index < module->signatures->size();
   }
+  bool IsValidImport(uint32_t index) {
+    return module && index < module->import_table->size();
+  }
   MachineType GetGlobalType(uint32_t index) {
     DCHECK(IsValidGlobal(index));
     return module->globals->at(index).type;
@@ -156,23 +190,41 @@
     DCHECK(IsValidFunction(index));
     return module->functions->at(index).sig;
   }
+  FunctionSig* GetImportSignature(uint32_t index) {
+    DCHECK(IsValidImport(index));
+    return module->import_table->at(index).sig;
+  }
   FunctionSig* GetSignature(uint32_t index) {
     DCHECK(IsValidSignature(index));
     return module->signatures->at(index);
   }
   size_t FunctionTableSize() {
-    return module ? module->function_table->size() : 0;
+    return module && module->function_table ? module->function_table->size()
+                                            : 0;
   }
 
   Handle<Code> GetFunctionCode(uint32_t index);
+  Handle<Code> GetImportCode(uint32_t index);
   Handle<FixedArray> GetFunctionTable();
 
-  compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone, FunctionSig* sig);
+  static compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone,
+                                                         FunctionSig* sig);
+  static compiler::CallDescriptor* GetI32WasmCallDescriptor(
+      Zone* zone, compiler::CallDescriptor* descriptor);
   compiler::CallDescriptor* GetCallDescriptor(Zone* zone, uint32_t index);
 };
 
+// A helper for printing out the names of functions.
+struct WasmFunctionName {
+  const WasmFunction* function_;
+  const WasmModule* module_;
+  WasmFunctionName(const WasmFunction* function, const ModuleEnv* menv)
+      : function_(function), module_(menv ? menv->module : nullptr) {}
+};
+
 std::ostream& operator<<(std::ostream& os, const WasmModule& module);
 std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
+std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
 
 typedef Result<WasmModule*> ModuleResult;
 typedef Result<WasmFunction*> FunctionResult;
@@ -185,6 +237,7 @@
 // For testing. Decode, verify, and run the last exported function in the
 // given decoded module.
 int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module);
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/wasm-opcodes.cc b/src/wasm/wasm-opcodes.cc
index 25eef03..a609e03 100644
--- a/src/wasm/wasm-opcodes.cc
+++ b/src/wasm/wasm-opcodes.cc
@@ -25,6 +25,20 @@
 }
 
 
+std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
+  if (sig.return_count() == 0) os << "v";
+  for (size_t i = 0; i < sig.return_count(); i++) {
+    os << WasmOpcodes::ShortNameOf(sig.GetReturn(i));
+  }
+  os << "_";
+  if (sig.parameter_count() == 0) os << "v";
+  for (size_t i = 0; i < sig.parameter_count(); i++) {
+    os << WasmOpcodes::ShortNameOf(sig.GetParam(i));
+  }
+  return os;
+}
+
+
 #define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
 
 
diff --git a/src/wasm/wasm-opcodes.h b/src/wasm/wasm-opcodes.h
index ae2843a..7cb9c00 100644
--- a/src/wasm/wasm-opcodes.h
+++ b/src/wasm/wasm-opcodes.h
@@ -66,6 +66,9 @@
 };
 
 typedef Signature<LocalType> FunctionSig;
+std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
+
+// TODO(titzer): Renumber all the opcodes to fill in holes.
 
 // Control expressions and blocks.
 #define FOREACH_CONTROL_OPCODE(V) \
@@ -80,7 +83,6 @@
   V(TableSwitch, 0x08, _)         \
   V(Return, 0x14, _)              \
   V(Unreachable, 0x15, _)
-// TODO(titzer): numbering
 
 // Constants, locals, globals, and calls.
 #define FOREACH_MISC_OPCODE(V) \
@@ -94,7 +96,8 @@
   V(LoadGlobal, 0x10, _)       \
   V(StoreGlobal, 0x11, _)      \
   V(CallFunction, 0x12, _)     \
-  V(CallIndirect, 0x13, _)
+  V(CallIndirect, 0x13, _)     \
+  V(CallImport, 0x1F, _)
 
 // Load memory expressions.
 #define FOREACH_LOAD_MEM_OPCODE(V) \
@@ -398,7 +401,6 @@
     }
   }
 
-  // TODO(titzer): remove this method
   static WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
     if (type == MachineType::Int8()) {
       return store ? kExprI32StoreMem8 : kExprI32LoadMem8S;
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index bfec51c..9a0fc7c 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -456,10 +456,8 @@
   }
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL) {
-    // TODO(1550) We are passing NULL as a slot because cell can never be on
-    // evacuation candidate.
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), NULL, cell);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+                                                                  cell);
   }
 }
 
@@ -478,23 +476,6 @@
 }
 
 
-bool RelocInfo::IsPatchedReturnSequence() {
-  // The recognized call sequence is:
-  //  movq(kScratchRegister, address); call(kScratchRegister);
-  // It only needs to be distinguished from a return sequence
-  //  movq(rsp, rbp); pop(rbp); ret(n); int3 *6
-  // The 11th byte is int3 (0xCC) in the return sequence and
-  // REX.WB (0x48+register bit) for the call sequence.
-  return pc_[Assembler::kMoveAddressIntoScratchRegisterInstructionLength] !=
-         0xCC;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
-  return !Assembler::IsNop(pc());
-}
-
-
 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
   DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   DCHECK(*pc_ == kCallOpcode);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 9626efc..3cf3398 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -3179,6 +3179,17 @@
 }
 
 
+void Assembler::cvtlsi2ss(XMMRegister dst, const Operand& src) {
+  DCHECK(!IsEnabled(AVX));
+  EnsureSpace ensure_space(this);
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x2A);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
   emit(0xF3);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 799fa6f..2847ff2 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -248,6 +248,8 @@
 
 typedef DoubleRegister XMMRegister;
 
+typedef DoubleRegister Simd128Register;
+
 enum Condition {
   // any value < 0 is considered no_condition
   no_condition  = -1,
@@ -1025,6 +1027,7 @@
 
   void cvttss2si(Register dst, const Operand& src);
   void cvttss2si(Register dst, XMMRegister src);
+  void cvtlsi2ss(XMMRegister dst, const Operand& src);
   void cvtlsi2ss(XMMRegister dst, Register src);
 
   void andps(XMMRegister dst, XMMRegister src);
@@ -1370,6 +1373,13 @@
   void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
     vsd(0x2a, dst, src1, src2, kF2, k0F, kW0);
   }
+  void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
+    XMMRegister isrc2 = {src2.code()};
+    vsd(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
+  }
+  void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vsd(0x2a, dst, src1, src2, kF3, k0F, kW0);
+  }
   void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
     XMMRegister isrc2 = {src2.code()};
     vsd(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
@@ -1384,6 +1394,14 @@
   void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
     vsd(0x2a, dst, src1, src2, kF2, k0F, kW1);
   }
+  void vcvttss2si(Register dst, XMMRegister src) {
+    XMMRegister idst = {dst.code()};
+    vsd(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+  }
+  void vcvttss2si(Register dst, const Operand& src) {
+    XMMRegister idst = {dst.code()};
+    vsd(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+  }
   void vcvttsd2si(Register dst, XMMRegister src) {
     XMMRegister idst = {dst.code()};
     vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
@@ -1660,7 +1678,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, const SourcePosition position);
+  void RecordDeoptReason(const int reason, int raw_position);
 
   void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
                                           ConstantPoolEntry::Access access,
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index cb092f2..6c4419e 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -60,27 +60,6 @@
 }
 
 
-static void CallRuntimePassFunction(
-    MacroAssembler* masm, Runtime::FunctionId function_id) {
-  // ----------- S t a t e -------------
-  //  -- rdx : new target (preserved for callee)
-  //  -- rdi : target function (preserved for callee)
-  // -----------------------------------
-
-  FrameScope scope(masm, StackFrame::INTERNAL);
-  // Push a copy of the target function and the new target.
-  __ Push(rdi);
-  __ Push(rdx);
-  // Function is also the parameter to the runtime call.
-  __ Push(rdi);
-
-  __ CallRuntime(function_id, 1);
-  // Restore target function and new target.
-  __ Pop(rdx);
-  __ Pop(rdi);
-}
-
-
 static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
   __ movp(kScratchRegister,
           FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -90,10 +69,35 @@
   __ jmp(kScratchRegister);
 }
 
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
+  // ----------- S t a t e -------------
+  //  -- rax : argument count (preserved for callee)
+  //  -- rdx : new target (preserved for callee)
+  //  -- rdi : target function (preserved for callee)
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    __ Integer32ToSmi(rax, rax);
+    __ Push(rax);
+    // Push a copy of the target function and the new target.
+    __ Push(rdi);
+    __ Push(rdx);
+    // Function is also the parameter to the runtime call.
+    __ Push(rdi);
 
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
-  __ leap(rax, FieldOperand(rax, Code::kHeaderSize));
-  __ jmp(rax);
+    __ CallRuntime(function_id, 1);
+    __ movp(rbx, rax);
+
+    // Restore target function and new target.
+    __ Pop(rdx);
+    __ Pop(rdi);
+    __ Pop(rax);
+    __ SmiToInteger32(rax, rax);
+  }
+  __ leap(rbx, FieldOperand(rbx, Code::kHeaderSize));
+  __ jmp(rbx);
 }
 
 
@@ -107,8 +111,7 @@
   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
   __ j(above_equal, &ok);
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
 
   __ bind(&ok);
   GenerateTailCallToSharedCode(masm);
@@ -117,7 +120,8 @@
 
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
-                                           bool create_implicit_receiver) {
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- rax: number of arguments
   //  -- rdi: constructor function
@@ -136,152 +140,23 @@
     __ Push(rcx);
 
     if (create_implicit_receiver) {
-      // Try to allocate the object without transitioning into C code. If any of
-      // the preconditions is not met, the code bails out to the runtime call.
-      Label rt_call, allocated;
-      if (FLAG_inline_new) {
-        // Verify that the new target is a JSFunction.
-        __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rbx);
-        __ j(not_equal, &rt_call);
-
-        // Load the initial map and verify that it is in fact a map.
-        // rdx: new target
-        __ movp(rax,
-                FieldOperand(rdx, JSFunction::kPrototypeOrInitialMapOffset));
-        // Will both indicate a NULL and a Smi
-        DCHECK(kSmiTag == 0);
-        __ JumpIfSmi(rax, &rt_call);
-        // rdi: constructor
-        // rax: initial map (if proven valid below)
-        __ CmpObjectType(rax, MAP_TYPE, rbx);
-        __ j(not_equal, &rt_call);
-
-        // Fall back to runtime if the expected base constructor and base
-        // constructor differ.
-        __ cmpp(rdi, FieldOperand(rax, Map::kConstructorOrBackPointerOffset));
-        __ j(not_equal, &rt_call);
-
-        // Now allocate the JSObject on the heap.
-        __ movzxbp(r9, FieldOperand(rax, Map::kInstanceSizeOffset));
-        __ shlp(r9, Immediate(kPointerSizeLog2));
-        // r9: size of new object
-        __ Allocate(r9, rbx, r9, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-        // Allocated the JSObject, now initialize the fields.
-        // rdi: constructor
-        // rdx: new target
-        // rax: initial map
-        // rbx: JSObject (not HeapObject tagged - the actual address).
-        // r9: start of next object
-        __ movp(Operand(rbx, JSObject::kMapOffset), rax);
-        __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
-        __ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
-        __ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
-        __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
-
-        // Add the object tag to make the JSObject real, so that we can continue
-        // and jump into the continuation code at any time from now on.
-        __ orp(rbx, Immediate(kHeapObjectTag));
-
-        // Fill all the in-object properties with the appropriate filler.
-        // rbx: JSObject (tagged)
-        // rcx: First in-object property of JSObject (not tagged)
-        __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
-
-        if (!is_api_function) {
-          Label no_inobject_slack_tracking;
-
-          // The code below relies on these assumptions.
-          STATIC_ASSERT(Map::kNoSlackTracking == 0);
-          STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-          // Check if slack tracking is enabled.
-          __ movl(rsi, FieldOperand(rax, Map::kBitField3Offset));
-          __ shrl(rsi, Immediate(Map::ConstructionCounter::kShift));
-          __ j(zero, &no_inobject_slack_tracking);  // Map::kNoSlackTracking
-          __ Push(rsi);  // Save allocation count value.
-          // Decrease generous allocation count.
-          __ subl(FieldOperand(rax, Map::kBitField3Offset),
-                  Immediate(1 << Map::ConstructionCounter::kShift));
-
-          // Allocate object with a slack.
-          __ movzxbp(rsi, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
-          __ negp(rsi);
-          __ leap(rsi, Operand(r9, rsi, times_pointer_size, 0));
-          // rsi: offset of first field after pre-allocated fields
-          if (FLAG_debug_code) {
-            __ cmpp(rcx, rsi);
-            __ Assert(less_equal,
-                      kUnexpectedNumberOfPreAllocatedPropertyFields);
-          }
-          __ InitializeFieldsWithFiller(rcx, rsi, r11);
-
-          // To allow truncation fill the remaining fields with one pointer
-          // filler map.
-          __ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
-          __ InitializeFieldsWithFiller(rcx, r9, r11);
-
-          __ Pop(rsi);  // Restore allocation count value before decreasing.
-          __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
-          __ j(not_equal, &allocated);
-
-          // Push the constructor, new_target and the object to the stack,
-          // and then the initial map as an argument to the runtime call.
-          __ Push(rdi);
-          __ Push(rdx);
-          __ Push(rbx);
-
-          __ Push(rax);  // initial map
-          __ CallRuntime(Runtime::kFinalizeInstanceSize);
-
-          __ Pop(rbx);
-          __ Pop(rdx);
-          __ Pop(rdi);
-
-          // Continue with JSObject being successfully allocated.
-          // rdi: constructor
-          // rdx: new target
-          // rbx: JSObject (tagged)
-          __ jmp(&allocated);
-
-          __ bind(&no_inobject_slack_tracking);
-        }
-
-        __ InitializeFieldsWithFiller(rcx, r9, r11);
-
-        // Continue with JSObject being successfully allocated
-        // rdi: constructor
-        // rdx: new target
-        // rbx: JSObject (tagged)
-        __ jmp(&allocated);
-      }
-
-      // Allocate the new receiver object using the runtime call.
-      // rdi: constructor
-      // rdx: new target
-      __ bind(&rt_call);
-
-      // Must restore rsi (context) before calling runtime.
-      __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
-      // Push the constructor and new_target twice, second pair as arguments
-      // to the runtime call.
+      // Allocate the new receiver object.
       __ Push(rdi);
       __ Push(rdx);
-      __ Push(rdi);  // constructor function
-      __ Push(rdx);  // new target
-      __ CallRuntime(Runtime::kNewObject);
-      __ movp(rbx, rax);  // store result in rbx
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ movp(rbx, rax);
       __ Pop(rdx);
       __ Pop(rdi);
 
-      // Receiver for constructor call allocated.
-      // rdi: constructor
-      // rdx: new target
-      // rbx: newly allocated object
-      __ bind(&allocated);
+      // ----------- S t a t e -------------
+      //  -- rdi: constructor function
+      //  -- rbx: newly allocated object
+      //  -- rdx: new target
+      // -----------------------------------
 
       // Retrieve smi-tagged arguments count from the stack.
-      __ movp(rax, Operand(rsp, 0));
-      __ SmiToInteger32(rax, rax);
+      __ SmiToInteger32(rax, Operand(rsp, 0 * kPointerSize));
     }
 
     if (create_implicit_receiver) {
@@ -357,6 +232,19 @@
     // Leave construct frame.
   }
 
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(rax, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
   // Remove caller arguments from the stack and return.
   __ PopReturnAddressTo(rcx);
   SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
@@ -371,17 +259,23 @@
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, true);
+  Generate_JSConstructStubHelper(masm, false, true, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true, true);
+  Generate_JSConstructStubHelper(masm, true, false, false);
 }
 
 
 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, false);
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
 
@@ -584,10 +478,8 @@
 //   o rbp: the caller's frame pointer
 //   o rsp: stack pointer (pointing to return address)
 //
-// The function builds a JS frame.  Please see JavaScriptFrameConstants in
-// frames-x64.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -599,14 +491,18 @@
   __ Push(rdi);  // Callee's JS function.
   __ Push(rdx);  // Callee's new target.
 
-  // Push zero for bytecode array offset.
-  __ Push(Immediate(0));
-
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into edi (InterpreterBytecodeRegister).
   __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  DCHECK_EQ(Smi::FromInt(0), DebugInfo::uninitialized());
+  __ cmpp(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
+          Immediate(0));
+  __ j(not_equal, &load_debug_bytecode_array);
   __ movp(kInterpreterBytecodeArrayRegister,
           FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
+  __ bind(&bytecode_array_loaded);
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -616,6 +512,11 @@
     __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Push bytecode array.
+  __ Push(kInterpreterBytecodeArrayRegister);
+  // Push zero for bytecode array offset.
+  __ Push(Immediate(0));
+
   // Allocate the local and temporary register file on the stack.
   {
     // Load frame size from the BytecodeArray object.
@@ -647,22 +548,9 @@
 
   // TODO(rmcilroy): List of things not currently dealt with here but done in
   // fullcodegen's prologue:
-  //  - Support profiler (specifically profiling_counter).
   //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Allow simulator stop operations if FLAG_stop_at is set.
   //  - Code aging of the BytecodeArray object.
 
-  // Perform stack guard check.
-  {
-    Label ok;
-    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-    __ j(above_equal, &ok, Label::kNear);
-    __ Push(kInterpreterBytecodeArrayRegister);
-    __ CallRuntime(Runtime::kStackGuard);
-    __ Pop(kInterpreterBytecodeArrayRegister);
-    __ bind(&ok);
-  }
-
   // Load accumulator, register file, bytecode offset, dispatch table into
   // registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -671,10 +559,9 @@
           Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
   __ movp(kInterpreterBytecodeOffsetRegister,
           Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ addp(kInterpreterDispatchTableRegister,
-          Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Move(
+      kInterpreterDispatchTableRegister,
+      ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
 
   // Dispatch to the first bytecode handler for the function.
   __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
@@ -685,6 +572,17 @@
   // and header removal.
   __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
   __ call(rbx);
+
+  // Even though the first bytecode handler was called, we will never return.
+  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+  // Load debug copy of the bytecode array.
+  __ bind(&load_debug_bytecode_array);
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  __ movp(debug_info, FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset));
+  __ movp(kInterpreterBytecodeArrayRegister,
+          FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+  __ jmp(&bytecode_array_loaded);
 }
 
 
@@ -742,7 +640,8 @@
 
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- rax : the number of arguments (not including the receiver)
   //  -- rbx : the address of the first argument to be pushed. Subsequent
@@ -758,7 +657,9 @@
 
   // Call the target.
   __ PushReturnAddressFrom(kScratchRegister);  // Re-push return address.
-  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            tail_call_mode),
+          RelocInfo::CODE_TARGET);
 }
 
 
@@ -790,52 +691,25 @@
 }
 
 
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(kInterpreterAccumulatorRegister);  // Save accumulator register.
-
-    // Pass the deoptimization type to the runtime system.
-    __ Push(Smi::FromInt(static_cast<int>(type)));
-
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-
-    __ Pop(kInterpreterAccumulatorRegister);  // Restore accumulator register.
-    // Tear down internal frame.
-  }
-
-  // Drop state (we don't use these for interpreter deopts) and push PC at top
-  // of stack (to simulate initial call to bytecode handler in interpreter entry
-  // trampoline).
-  __ Pop(rbx);
-  __ Drop(1);
-  __ Push(rbx);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
   // Initialize register file register and dispatch table register.
   __ movp(kInterpreterRegisterFileRegister, rbp);
   __ addp(kInterpreterRegisterFileRegister,
           Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
-  __ LoadRoot(kInterpreterDispatchTableRegister,
-              Heap::kInterpreterTableRootIndex);
-  __ addp(kInterpreterDispatchTableRegister,
-          Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Move(
+      kInterpreterDispatchTableRegister,
+      ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
 
   // Get the context from the frame.
-  // TODO(rmcilroy): Update interpreter frame to expect current context at the
-  // context slot instead of the function context.
   __ movp(kContextRegister,
           Operand(kInterpreterRegisterFileRegister,
                   InterpreterFrameConstants::kContextFromRegisterPointer));
 
   // Get the bytecode array pointer from the frame.
-  __ movp(rbx,
-          Operand(kInterpreterRegisterFileRegister,
-                  InterpreterFrameConstants::kFunctionFromRegisterPointer));
-  __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
-  __ movp(kInterpreterBytecodeArrayRegister,
-          FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
+  __ movp(
+      kInterpreterBytecodeArrayRegister,
+      Operand(kInterpreterRegisterFileRegister,
+              InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -863,6 +737,32 @@
 }
 
 
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+    MacroAssembler* masm, Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass the deoptimization type to the runtime system.
+    __ Push(Smi::FromInt(static_cast<int>(type)));
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+    // Tear down internal frame.
+  }
+
+  // Drop state (we don't use these for interpreter deopts) and and pop the
+  // accumulator value into the accumulator register and push PC at top
+  // of stack (to simulate initial call to bytecode handler in interpreter entry
+  // trampoline).
+  __ Pop(rbx);
+  __ Drop(1);
+  __ Pop(kInterpreterAccumulatorRegister);
+  __ Push(rbx);
+
+  // Enter the bytecode dispatch.
+  Generate_EnterBytecodeDispatch(masm);
+}
+
+
 void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
 }
@@ -877,22 +777,30 @@
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the address of the interpreter entry trampoline as a return address.
+  // This simulates the initial call to bytecode handlers in interpreter entry
+  // trampoline. The return will never actually be taken, but our stack walker
+  // uses this address to determine whether a frame is interpreted.
+  __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
+
+  Generate_EnterBytecodeDispatch(masm);
+}
+
 
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
 }
 
 
 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
 }
 
 
@@ -1166,7 +1074,8 @@
   __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
 
   // 4b. The argArray is either null or undefined, so we tail call without any
-  // arguments to the receiver.
+  // arguments to the receiver. Since we did not create a frame for
+  // Function.prototype.apply() yet, we use a normal Call builtin here.
   __ bind(&no_arguments);
   {
     __ Set(rax, 0);
@@ -1230,6 +1139,8 @@
   }
 
   // 4. Call the callable.
+  // Since we did not create a frame for Function.prototype.call() yet,
+  // we use a normal Call builtin here.
   __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
 }
 
@@ -1443,6 +1354,118 @@
 
 
 // static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- rax                 : number of arguments
+  //  -- rsp[0]              : return address
+  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- rsp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Condition const cc = (kind == MathMaxMinKind::kMin) ? below : above;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  XMMRegister const reg = (kind == MathMaxMinKind::kMin) ? xmm1 : xmm0;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in rdx and the double value in xmm0.
+  __ LoadRoot(rdx, root_index);
+  __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+  __ Move(rcx, rax);
+
+  Label done_loop, loop;
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ testp(rcx, rcx);
+    __ j(zero, &done_loop);
+
+    // Load the next parameter tagged value into rbx.
+    __ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
+
+    // Load the double value of the parameter into xmm1, maybe converting the
+    // parameter to a number first using the ToNumberStub if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(rbx, &convert_smi);
+    __ JumpIfRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+                  Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumberStub to convert it.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Integer32ToSmi(rax, rax);
+      __ Integer32ToSmi(rcx, rcx);
+      __ Push(rax);
+      __ Push(rcx);
+      __ Push(rdx);
+      __ movp(rax, rbx);
+      ToNumberStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ movp(rbx, rax);
+      __ Pop(rdx);
+      __ Pop(rcx);
+      __ Pop(rax);
+      {
+        // Restore the double accumulator value (xmm0).
+        Label restore_smi, done_restore;
+        __ JumpIfSmi(rdx, &restore_smi, Label::kNear);
+        __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+        __ jmp(&done_restore, Label::kNear);
+        __ bind(&restore_smi);
+        __ SmiToDouble(xmm0, rdx);
+        __ bind(&done_restore);
+      }
+      __ SmiToInteger32(rcx, rcx);
+      __ SmiToInteger32(rax, rax);
+    }
+    __ jmp(&convert);
+    __ bind(&convert_number);
+    __ Movsd(xmm1, FieldOperand(rbx, HeapNumber::kValueOffset));
+    __ jmp(&done_convert, Label::kNear);
+    __ bind(&convert_smi);
+    __ SmiToDouble(xmm1, rbx);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (xmm0) and the next parameter value on the right hand side (xmm1).
+    Label compare_equal, compare_nan, compare_swap, done_compare;
+    __ Ucomisd(xmm0, xmm1);
+    __ j(parity_even, &compare_nan, Label::kNear);
+    __ j(cc, &done_compare, Label::kNear);
+    __ j(equal, &compare_equal, Label::kNear);
+
+    // Result is on the right hand side.
+    __ bind(&compare_swap);
+    __ Movaps(xmm0, xmm1);
+    __ Move(rdx, rbx);
+    __ jmp(&done_compare, Label::kNear);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    __ LoadRoot(rdx, Heap::kNanValueRootIndex);
+    __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+    __ jmp(&done_compare, Label::kNear);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ bind(&compare_equal);
+    __ Movmskpd(kScratchRegister, reg);
+    __ testl(kScratchRegister, Immediate(1));
+    __ j(not_zero, &compare_swap);
+
+    __ bind(&done_compare);
+    __ decp(rcx);
+    __ jmp(&loop);
+  }
+
+  __ bind(&done_loop);
+  __ PopReturnAddressTo(rcx);
+  __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+  __ PushReturnAddressFrom(rcx);
+  __ movp(rax, rdx);
+  __ Ret();
+}
+
+// static
 void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax                 : number of arguments
@@ -1542,9 +1565,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(rbx);  // the first argument
-    __ Push(rdi);  // constructor function
-    __ Push(rdx);  // new target
-    __ CallRuntime(Runtime::kNewObject);
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(FieldOperand(rax, JSValue::kValueOffset));
   }
   __ Ret();
@@ -1678,9 +1700,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(rbx);  // the first argument
-    __ Push(rdi);  // constructor function
-    __ Push(rdx);  // new target
-    __ CallRuntime(Runtime::kNewObject);
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(FieldOperand(rax, JSValue::kValueOffset));
   }
   __ Ret();
@@ -1931,9 +1952,7 @@
 
     // Try to create the list from an arguments object.
     __ bind(&create_arguments);
-    __ movp(rbx,
-            FieldOperand(rax, JSObject::kHeaderSize +
-                                  Heap::kArgumentsLengthIndex * kPointerSize));
+    __ movp(rbx, FieldOperand(rax, JSArgumentsObject::kLengthOffset));
     __ movp(rcx, FieldOperand(rax, JSObject::kElementsOffset));
     __ cmpp(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
     __ j(not_equal, &create_runtime);
@@ -2010,10 +2029,136 @@
   }
 }
 
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg
+// |  f()'s caller pc      <- sp
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if the debugger is not active.
+  Label done;
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(masm->isolate());
+  __ Move(kScratchRegister, debug_is_active);
+  __ cmpb(Operand(kScratchRegister, 0), Immediate(0));
+  __ j(not_equal, &done);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ Cmp(Operand(rbp, StandardFrameConstants::kMarkerOffset),
+           Smi::FromInt(StackFrame::STUB));
+    __ j(not_equal, &no_interpreter_frame, Label::kNear);
+    __ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ Cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+  // Drop arguments adaptor frame and load arguments count.
+  __ movp(rbp, scratch2);
+  __ SmiToInteger32(
+      scratch1, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ movp(scratch1, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ movp(scratch1,
+          FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadSharedFunctionInfoSpecialField(
+      scratch1, scratch1, SharedFunctionInfo::kFormalParameterCountOffset);
+
+  __ bind(&formal_parameter_count_loaded);
+
+  // Calculate the destination address where we will put the return address
+  // after we drop current frame.
+  Register new_sp_reg = scratch2;
+  __ subp(scratch1, args_reg);
+  __ leap(new_sp_reg, Operand(rbp, scratch1, times_pointer_size,
+                              StandardFrameConstants::kCallerPCOffset));
+
+  if (FLAG_debug_code) {
+    __ cmpp(rsp, new_sp_reg);
+    __ Check(below, kStackAccessBelowStackPointer);
+  }
+
+  // Copy receiver and return address as well.
+  Register count_reg = scratch1;
+  __ leap(count_reg, Operand(args_reg, 2));
+
+  // Copy return address from caller's frame to current frame's return address
+  // to avoid its trashing and let the following loop copy it to the right
+  // place.
+  Register tmp_reg = scratch3;
+  __ movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+  __ movp(Operand(rsp, 0), tmp_reg);
+
+  // Restore caller's frame pointer now as it could be overwritten by
+  // the copying loop.
+  __ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+  Operand src(rsp, count_reg, times_pointer_size, 0);
+  Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+  Label loop, entry;
+  __ jmp(&entry, Label::kNear);
+  __ bind(&loop);
+  __ decp(count_reg);
+  __ movp(tmp_reg, src);
+  __ movp(dst, tmp_reg);
+  __ bind(&entry);
+  __ cmpp(count_reg, Immediate(0));
+  __ j(not_equal, &loop, Label::kNear);
+
+  // Leave current frame.
+  __ movp(rsp, new_sp_reg);
+
+  __ bind(&done);
+}
+}  // namespace
 
 // static
 void Builtins::Generate_CallFunction(MacroAssembler* masm,
-                                     ConvertReceiverMode mode) {
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- rax : the number of arguments (not including the receiver)
   //  -- rdi : the function to call (checked to be a JSFunction)
@@ -2109,6 +2254,10 @@
   //  -- rsi : the function context.
   // -----------------------------------
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, rax, rbx, rcx, r8);
+  }
+
   __ LoadSharedFunctionInfoSpecialField(
       rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
   ParameterCount actual(rax);
@@ -2213,13 +2362,18 @@
 
 
 // static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- rax : the number of arguments (not including the receiver)
   //  -- rdi : the function to call (checked to be a JSBoundFunction)
   // -----------------------------------
   __ AssertBoundFunction(rdi);
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, rax, rbx, rcx, r8);
+  }
+
   // Patch the receiver to [[BoundThis]].
   StackArgumentsAccessor args(rsp, rax);
   __ movp(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
@@ -2238,7 +2392,8 @@
 
 
 // static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- rax : the number of arguments (not including the receiver)
   //  -- rdi : the target to call (can be any Object)
@@ -2249,14 +2404,25 @@
   __ JumpIfSmi(rdi, &non_callable);
   __ bind(&non_smi);
   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
+  __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
        RelocInfo::CODE_TARGET);
   __ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
-  __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
+  __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
        RelocInfo::CODE_TARGET);
+
+  // Check if target has a [[Call]] internal method.
+  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsCallable));
+  __ j(zero, &non_callable);
+
   __ CmpInstanceType(rcx, JS_PROXY_TYPE);
   __ j(not_equal, &non_function);
 
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, rax, rbx, rcx, r8);
+  }
+
   // 1. Runtime fallback for Proxy [[Call]].
   __ PopReturnAddressTo(kScratchRegister);
   __ Push(rdi);
@@ -2271,16 +2437,12 @@
   // 2. Call to something else, which might have a [[Call]] internal method (if
   // not we raise an exception).
   __ bind(&non_function);
-  // Check if target has a [[Call]] internal method.
-  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
-           Immediate(1 << Map::kIsCallable));
-  __ j(zero, &non_callable, Label::kNear);
   // Overwrite the original receiver with the (original) target.
   __ movp(args.GetReceiverOperand(), rdi);
   // Let the "call_as_function_delegate" take care of the rest.
   __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
   __ Jump(masm->isolate()->builtins()->CallFunction(
-              ConvertReceiverMode::kNotNullOrUndefined),
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
           RelocInfo::CODE_TARGET);
 
   // 3. Call to something that is not callable.
@@ -2473,14 +2635,11 @@
 
   // Load the next prototype.
   __ bind(&next_prototype);
-  __ movp(receiver, FieldOperand(map, Map::kPrototypeOffset));
-  // End if the prototype is null or not hidden.
-  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
-  __ j(equal, receiver_check_failed);
-  __ movp(map, FieldOperand(receiver, HeapObject::kMapOffset));
   __ testq(FieldOperand(map, Map::kBitField3Offset),
-           Immediate(Map::IsHiddenPrototype::kMask));
+           Immediate(Map::HasHiddenPrototype::kMask));
   __ j(zero, receiver_check_failed);
+  __ movp(receiver, FieldOperand(map, Map::kPrototypeOffset));
+  __ movp(map, FieldOperand(receiver, HeapObject::kMapOffset));
   // Iterate.
   __ jmp(&prototype_loop_start, Label::kNear);
 
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 1e14f83..f314b9c 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -487,7 +487,6 @@
   __ Cvtlsi2sd(double_exponent, exponent);
 
   // Returning or bailing out.
-  Counters* counters = isolate()->counters();
   if (exponent_type() == ON_STACK) {
     // The arguments are still on the stack.
     __ bind(&call_runtime);
@@ -498,7 +497,6 @@
     __ bind(&done);
     __ AllocateHeapNumber(rax, rcx, &call_runtime);
     __ Movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
-    __ IncrementCounter(counters->math_pow(), 1);
     __ ret(2 * kPointerSize);
   } else {
     __ bind(&call_runtime);
@@ -515,7 +513,6 @@
     __ Movsd(double_result, xmm0);
 
     __ bind(&done);
-    __ IncrementCounter(counters->math_pow(), 1);
     __ ret(0);
   }
 }
@@ -537,340 +534,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  // The key is in rdx and the parameter count is in rax.
-  DCHECK(rdx.is(ArgumentsAccessReadDescriptor::index()));
-  DCHECK(rax.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
-  // Check that the key is a smi.
-  Label slow;
-  __ JumpIfNotSmi(rdx, &slow);
-
-  // Check if the calling frame is an arguments adaptor frame.  We look at the
-  // context offset, and if the frame is not a regular one, then we find a
-  // Smi instead of the context.  We can't use SmiCompare here, because that
-  // only works for comparing two smis.
-  Label adaptor;
-  __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(equal, &adaptor);
-
-  // Check index against formal parameters count limit passed in
-  // through register rax. Use unsigned comparison to get negative
-  // check for free.
-  __ cmpp(rdx, rax);
-  __ j(above_equal, &slow);
-
-  // Read the argument from the stack and return it.
-  __ SmiSub(rax, rax, rdx);
-  __ SmiToInteger32(rax, rax);
-  StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
-  __ movp(rax, args.GetArgumentOperand(0));
-  __ Ret();
-
-  // Arguments adaptor case: Check index against actual arguments
-  // limit found in the arguments adaptor frame. Use unsigned
-  // comparison to get negative check for free.
-  __ bind(&adaptor);
-  __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ cmpp(rdx, rcx);
-  __ j(above_equal, &slow);
-
-  // Read the argument from the stack and return it.
-  __ SmiSub(rcx, rcx, rdx);
-  __ SmiToInteger32(rcx, rcx);
-  StackArgumentsAccessor adaptor_args(rbx, rcx,
-                                      ARGUMENTS_DONT_CONTAIN_RECEIVER);
-  __ movp(rax, adaptor_args.GetArgumentOperand(0));
-  __ Ret();
-
-  // Slow-case: Handle non-smi or out-of-bounds access to arguments
-  // by calling the runtime system.
-  __ bind(&slow);
-  __ PopReturnAddressTo(rbx);
-  __ Push(rdx);
-  __ PushReturnAddressFrom(rbx);
-  __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
-  // rcx : number of parameters (tagged)
-  // rdx : parameters pointer
-  // rdi : function
-  // rsp[0] : return address
-  // Registers used over the whole function:
-  //  rbx: the mapped parameter count (untagged)
-  //  rax: the allocated object (tagged).
-  Factory* factory = isolate()->factory();
-
-  DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  __ SmiToInteger64(rbx, rcx);
-  // rbx = parameter count (untagged)
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movp(r8, Operand(rax, StandardFrameConstants::kContextOffset));
-  __ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(equal, &adaptor_frame);
-
-  // No adaptor, parameter count = argument count.
-  __ movp(r11, rbx);
-  __ jmp(&try_allocate, Label::kNear);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ SmiToInteger64(
-      r11, Operand(rax, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ leap(rdx, Operand(rax, r11, times_pointer_size,
-                       StandardFrameConstants::kCallerSPOffset));
-
-  // rbx = parameter count (untagged)
-  // r11 = argument count (untagged)
-  // Compute the mapped parameter count = min(rbx, r11) in rbx.
-  __ cmpp(rbx, r11);
-  __ j(less_equal, &try_allocate, Label::kNear);
-  __ movp(rbx, r11);
-
-  __ bind(&try_allocate);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  Label no_parameter_map;
-  __ xorp(r8, r8);
-  __ testp(rbx, rbx);
-  __ j(zero, &no_parameter_map, Label::kNear);
-  __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
-  __ bind(&no_parameter_map);
-
-  // 2. Backing store.
-  __ leap(r8, Operand(r8, r11, times_pointer_size, FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(r8, rax, r9, no_reg, &runtime, TAG_OBJECT);
-
-  // rax = address of new object(s) (tagged)
-  // r11 = argument count (untagged)
-  // Get the arguments map from the current native context into r9.
-  Label has_mapped_parameters, instantiate;
-  __ movp(r9, NativeContextOperand());
-  __ testp(rbx, rbx);
-  __ j(not_zero, &has_mapped_parameters, Label::kNear);
-
-  const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX;
-  __ movp(r9, Operand(r9, Context::SlotOffset(kIndex)));
-  __ jmp(&instantiate, Label::kNear);
-
-  const int kAliasedIndex = Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX;
-  __ bind(&has_mapped_parameters);
-  __ movp(r9, Operand(r9, Context::SlotOffset(kAliasedIndex)));
-  __ bind(&instantiate);
-
-  // rax = address of new object (tagged)
-  // rbx = mapped parameter count (untagged)
-  // r11 = argument count (untagged)
-  // r9 = address of arguments map (tagged)
-  __ movp(FieldOperand(rax, JSObject::kMapOffset), r9);
-  __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
-  __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
-  __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
-
-  // Set up the callee in-object property.
-  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
-  __ AssertNotSmi(rdi);
-  __ movp(FieldOperand(rax, JSObject::kHeaderSize +
-                                Heap::kArgumentsCalleeIndex * kPointerSize),
-          rdi);
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  // Note: r11 is tagged from here on.
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  __ Integer32ToSmi(r11, r11);
-  __ movp(FieldOperand(rax, JSObject::kHeaderSize +
-                                Heap::kArgumentsLengthIndex * kPointerSize),
-          r11);
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, rdi will point there, otherwise to the
-  // backing store.
-  __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize));
-  __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
-
-  // rax = address of new object (tagged)
-  // rbx = mapped parameter count (untagged)
-  // r11 = argument count (tagged)
-  // rdi = address of parameter map or backing store (tagged)
-
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ testp(rbx, rbx);
-  __ j(zero, &skip_parameter_map);
-
-  __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
-  // rbx contains the untagged argument count. Add 2 and tag to write.
-  __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
-  __ Integer64PlusConstantToSmi(r9, rbx, 2);
-  __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
-  __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
-  __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
-  __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop, parameters_test;
-
-  // Load tagged parameter count into r9.
-  __ Integer32ToSmi(r9, rbx);
-  __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
-  __ addp(r8, rcx);
-  __ subp(r8, r9);
-  __ movp(rcx, rdi);
-  __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
-  __ SmiToInteger64(r9, r9);
-  // r9 = loop variable (untagged)
-  // r8 = mapping index (tagged)
-  // rcx = address of parameter map (tagged)
-  // rdi = address of backing store (tagged)
-  __ jmp(&parameters_test, Label::kNear);
-
-  __ bind(&parameters_loop);
-  __ subp(r9, Immediate(1));
-  __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
-  __ movp(FieldOperand(rcx, r9, times_pointer_size, kParameterMapHeaderSize),
-          r8);
-  __ movp(FieldOperand(rdi, r9, times_pointer_size, FixedArray::kHeaderSize),
-          kScratchRegister);
-  __ SmiAddConstant(r8, r8, Smi::FromInt(1));
-  __ bind(&parameters_test);
-  __ testp(r9, r9);
-  __ j(not_zero, &parameters_loop, Label::kNear);
-
-  __ bind(&skip_parameter_map);
-
-  // r11 = argument count (tagged)
-  // rdi = address of backing store (tagged)
-  // Copy arguments header and remaining slots (if there are any).
-  __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
-          factory->fixed_array_map());
-  __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r11);
-
-  Label arguments_loop, arguments_test;
-  __ movp(r8, rbx);
-  // Untag r11 for the loop below.
-  __ SmiToInteger64(r11, r11);
-  __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
-  __ subp(rdx, kScratchRegister);
-  __ jmp(&arguments_test, Label::kNear);
-
-  __ bind(&arguments_loop);
-  __ subp(rdx, Immediate(kPointerSize));
-  __ movp(r9, Operand(rdx, 0));
-  __ movp(FieldOperand(rdi, r8,
-                       times_pointer_size,
-                       FixedArray::kHeaderSize),
-          r9);
-  __ addp(r8, Immediate(1));
-
-  __ bind(&arguments_test);
-  __ cmpp(r8, r11);
-  __ j(less, &arguments_loop, Label::kNear);
-
-  // Return.
-  __ ret(0);
-
-  // Do the runtime call to allocate the arguments object.
-  // r11 = argument count (untagged)
-  __ bind(&runtime);
-  __ Integer32ToSmi(r11, r11);
-  __ PopReturnAddressTo(rax);
-  __ Push(rdi);  // Push function.
-  __ Push(rdx);  // Push parameters pointer.
-  __ Push(r11);  // Push parameter count.
-  __ PushReturnAddressFrom(rax);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
-  // rcx : number of parameters (tagged)
-  // rdx : parameters pointer
-  // rdi : function
-  // rsp[0] : return address
-
-  DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
-  __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movp(rax, Operand(rbx, StandardFrameConstants::kContextOffset));
-  __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &runtime);
-
-  // Patch the arguments.length and the parameters pointer.
-  StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
-  __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiToInteger64(rax, rcx);
-  __ leap(rdx, Operand(rbx, rax, times_pointer_size,
-                       StandardFrameConstants::kCallerSPOffset));
-
-  __ bind(&runtime);
-  __ PopReturnAddressTo(rax);
-  __ Push(rdi);  // Push function.
-  __ Push(rdx);  // Push parameters pointer.
-  __ Push(rcx);  // Push parameter count.
-  __ PushReturnAddressFrom(rax);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
-  // rcx : number of parameters (tagged)
-  // rdx : parameters pointer
-  // rbx : rest parameter index (tagged)
-  // rsp[0] : return address
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
-  __ movp(r8, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movp(rax, Operand(r8, StandardFrameConstants::kContextOffset));
-  __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &runtime);
-
-  // Patch the arguments.length and the parameters pointer.
-  StackArgumentsAccessor args(rsp, 4, ARGUMENTS_DONT_CONTAIN_RECEIVER);
-  __ movp(rcx, Operand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiToInteger64(rax, rcx);
-  __ leap(rdx, Operand(r8, rax, times_pointer_size,
-                       StandardFrameConstants::kCallerSPOffset));
-
-  __ bind(&runtime);
-  __ PopReturnAddressTo(rax);
-  __ Push(rcx);  // Push number of parameters.
-  __ Push(rdx);  // Push parameters pointer.
-  __ Push(rbx);  // Push rest parameter index.
-  __ PushReturnAddressFrom(rax);
-  __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
   // Return address is on the stack.
   Label slow;
@@ -932,103 +595,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
-  // rcx : number of parameters (tagged)
-  // rdx : parameters pointer
-  // rdi : function
-  // rsp[0] : return address
-
-  DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movp(rax, Operand(rbx, StandardFrameConstants::kContextOffset));
-  __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(equal, &adaptor_frame);
-
-  // Get the length from the frame.
-  __ SmiToInteger64(rax, rcx);
-  __ jmp(&try_allocate);
-
-  // Patch the arguments.length and the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiToInteger64(rax, rcx);
-  __ leap(rdx, Operand(rbx, rax, times_pointer_size,
-                       StandardFrameConstants::kCallerSPOffset));
-
-  // Try the new space allocation. Start out with computing the size of
-  // the arguments object and the elements array.
-  Label add_arguments_object;
-  __ bind(&try_allocate);
-  __ testp(rax, rax);
-  __ j(zero, &add_arguments_object, Label::kNear);
-  __ leap(rax, Operand(rax, times_pointer_size, FixedArray::kHeaderSize));
-  __ bind(&add_arguments_object);
-  __ addp(rax, Immediate(Heap::kStrictArgumentsObjectSize));
-
-  // Do the allocation of both objects in one go.
-  __ Allocate(rax, rax, rbx, no_reg, &runtime, TAG_OBJECT);
-
-  // Get the arguments map from the current native context.
-  __ movp(rdi, NativeContextOperand());
-  __ movp(rdi, ContextOperand(rdi, Context::STRICT_ARGUMENTS_MAP_INDEX));
-
-  __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
-  __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
-  __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
-  __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
-
-  // Get the length (smi tagged) and set that as an in-object property too.
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  __ movp(FieldOperand(rax, JSObject::kHeaderSize +
-                       Heap::kArgumentsLengthIndex * kPointerSize),
-          rcx);
-
-  // If there are no actual arguments, we're done.
-  Label done;
-  __ testp(rcx, rcx);
-  __ j(zero, &done);
-
-  // Set up the elements pointer in the allocated arguments object and
-  // initialize the header in the elements fixed array.
-  __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize));
-  __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
-  __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
-  __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
-  __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
-
-  // Untag the length for the loop below.
-  __ SmiToInteger64(rcx, rcx);
-
-  // Copy the fixed array slots.
-  Label loop;
-  __ bind(&loop);
-  __ movp(rbx, Operand(rdx, -1 * kPointerSize));  // Skip receiver.
-  __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
-  __ addp(rdi, Immediate(kPointerSize));
-  __ subp(rdx, Immediate(kPointerSize));
-  __ decp(rcx);
-  __ j(not_zero, &loop);
-
-  // Return.
-  __ bind(&done);
-  __ ret(0);
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ PopReturnAddressTo(rax);
-  __ Push(rdi);  // Push function.
-  __ Push(rdx);  // Push parameters pointer.
-  __ Push(rcx);  // Push parameter count.
-  __ PushReturnAddressFrom(rax);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -1101,35 +667,34 @@
   __ movp(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
   __ JumpIfSmi(rdi, &runtime);
   __ movp(r15, rdi);  // Make a copy of the original subject string.
-  __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
-  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
   // rax: RegExp data (FixedArray)
   // rdi: subject string
   // r15: subject string
   // Handle subject string according to its encoding and representation:
   // (1) Sequential two byte?  If yes, go to (9).
-  // (2) Sequential one byte?  If yes, go to (6).
-  // (3) Anything but sequential or cons?  If yes, go to (7).
-  // (4) Cons string.  If the string is flat, replace subject with first string.
-  //     Otherwise bailout.
-  // (5a) Is subject sequential two byte?  If yes, go to (9).
-  // (5b) Is subject external?  If yes, go to (8).
-  // (6) One byte sequential.  Load regexp code for one byte.
+  // (2) Sequential one byte?  If yes, go to (5).
+  // (3) Sequential or cons?  If not, go to (6).
+  // (4) Cons string.  If the string is flat, replace subject with first string
+  //     and go to (1). Otherwise bail out to runtime.
+  // (5) One byte sequential.  Load regexp code for one byte.
   // (E) Carry on.
   /// [...]
 
   // Deferred code at the end of the stub:
-  // (7) Not a long external string?  If yes, go to (10).
-  // (8) External string.  Make it, offset-wise, look like a sequential string.
-  // (8a) Is the external string one byte?  If yes, go to (6).
-  // (9) Two byte sequential.  Load regexp code for one byte. Go to (E).
+  // (6) Long external string?  If not, go to (10).
+  // (7) External string.  Make it, offset-wise, look like a sequential string.
+  // (8) Is the external string one byte?  If yes, go to (5).
+  // (9) Two byte sequential.  Load regexp code for two byte. Go to (E).
   // (10) Short external string or not a string?  If yes, bail out to runtime.
-  // (11) Sliced string.  Replace subject with parent. Go to (5a).
+  // (11) Sliced string.  Replace subject with parent. Go to (1).
 
-  Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
-        external_string /* 8 */, check_underlying /* 5a */,
-        not_seq_nor_cons /* 7 */, check_code /* E */,
-        not_long_external /* 10 */;
+  Label seq_one_byte_string /* 5 */, seq_two_byte_string /* 9 */,
+      external_string /* 7 */, check_underlying /* 1 */,
+      not_seq_nor_cons /* 6 */, check_code /* E */, not_long_external /* 10 */;
+
+  __ bind(&check_underlying);
+  __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
 
   // (1) Sequential two byte?  If yes, go to (9).
   __ andb(rbx, Immediate(kIsNotStringMask |
@@ -1139,14 +704,14 @@
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string);  // Go to (9).
 
-  // (2) Sequential one byte?  If yes, go to (6).
+  // (2) Sequential one byte?  If yes, go to (5).
   // Any other sequential string must be one byte.
   __ andb(rbx, Immediate(kIsNotStringMask |
                          kStringRepresentationMask |
                          kShortExternalStringMask));
-  __ j(zero, &seq_one_byte_string, Label::kNear);  // Go to (6).
+  __ j(zero, &seq_one_byte_string, Label::kNear);  // Go to (5).
 
-  // (3) Anything but sequential or cons?  If yes, go to (7).
+  // (3) Sequential or cons?  If not, go to (6).
   // We check whether the subject string is a cons, since sequential strings
   // have already been covered.
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
@@ -1154,7 +719,7 @@
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmpp(rbx, Immediate(kExternalStringTag));
-  __ j(greater_equal, &not_seq_nor_cons);  // Go to (7).
+  __ j(greater_equal, &not_seq_nor_cons);  // Go to (6).
 
   // (4) Cons string.  Check that it's flat.
   // Replace subject with first string and reload instance type.
@@ -1162,22 +727,9 @@
                  Heap::kempty_stringRootIndex);
   __ j(not_equal, &runtime);
   __ movp(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
-  __ bind(&check_underlying);
-  __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
-  __ movp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+  __ jmp(&check_underlying);
 
-  // (5a) Is subject sequential two byte?  If yes, go to (9).
-  __ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
-  STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
-  __ j(zero, &seq_two_byte_string);  // Go to (9).
-  // (5b) Is subject external?  If yes, go to (8).
-  __ testb(rbx, Immediate(kStringRepresentationMask));
-  // The underlying external string is never a short external string.
-  STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
-  STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
-  __ j(not_zero, &external_string);  // Go to (8)
-
-  // (6) One byte sequential.  Load regexp code for one byte.
+  // (5) One byte sequential.  Load regexp code for one byte.
   __ bind(&seq_one_byte_string);
   // rax: RegExp data (FixedArray)
   __ movp(r11, FieldOperand(rax, JSRegExp::kDataOneByteCodeOffset));
@@ -1424,12 +976,12 @@
   __ TailCallRuntime(Runtime::kRegExpExec);
 
   // Deferred code for string handling.
-  // (7) Not a long external string?  If yes, go to (10).
+  // (6) Long external string?  If not, go to (10).
   __ bind(&not_seq_nor_cons);
   // Compare flags are still set from (3).
   __ j(greater, &not_long_external, Label::kNear);  // Go to (10).
 
-  // (8) External string.  Short external strings have been ruled out.
+  // (7) External string.  Short external strings have been ruled out.
   __ bind(&external_string);
   __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
@@ -1444,13 +996,13 @@
   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  // (8a) Is the external string one byte?  If yes, go to (6).
+  // (8) Is the external string one byte?  If yes, go to (5).
   __ testb(rbx, Immediate(kStringEncodingMask));
-  __ j(not_zero, &seq_one_byte_string);  // Goto (6).
+  __ j(not_zero, &seq_one_byte_string);  // Go to (5).
 
   // rdi: subject string (flat two-byte)
   // rax: RegExp data (FixedArray)
-  // (9) Two byte sequential.  Load regexp code for one byte.  Go to (E).
+  // (9) Two byte sequential.  Load regexp code for two byte.  Go to (E).
   __ bind(&seq_two_byte_string);
   __ movp(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
   __ Set(rcx, 0);  // Type is two byte.
@@ -1463,7 +1015,7 @@
   __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
   __ j(not_zero, &runtime);
 
-  // (11) Sliced string.  Replace subject with parent. Go to (5a).
+  // (11) Sliced string.  Replace subject with parent. Go to (1).
   // Load offset into r14 and replace subject string with parent.
   __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
   __ movp(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
@@ -1545,16 +1097,11 @@
       // Check for undefined.  undefined OP undefined is false even though
       // undefined == undefined.
       __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
-      if (is_strong(strength())) {
-        // In strong mode, this comparison must throw, so call the runtime.
-        __ j(equal, &runtime_call, Label::kFar);
-      } else {
-        Label check_for_nan;
-        __ j(not_equal, &check_for_nan, Label::kNear);
-        __ Set(rax, NegativeComparisonResult(cc));
-        __ ret(0);
-        __ bind(&check_for_nan);
-      }
+      Label check_for_nan;
+      __ j(not_equal, &check_for_nan, Label::kNear);
+      __ Set(rax, NegativeComparisonResult(cc));
+      __ ret(0);
+      __ bind(&check_for_nan);
     }
 
     // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
@@ -1576,12 +1123,6 @@
       // Call runtime on identical SIMD values since we must throw a TypeError.
       __ cmpb(rcx, Immediate(static_cast<uint8_t>(SIMD128_VALUE_TYPE)));
       __ j(equal, &runtime_call, Label::kFar);
-      if (is_strong(strength())) {
-        // We have already tested for smis and heap numbers, so if both
-        // arguments are not strings we must proceed to the slow case.
-        __ testb(rcx, Immediate(kIsNotStringMask));
-        __ j(not_zero, &runtime_call, Label::kFar);
-      }
     }
     __ Set(rax, EQUAL);
     __ ret(0);
@@ -1728,7 +1269,7 @@
     // Not strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    Label return_unequal;
+    Label return_unequal, undetectable;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -1737,40 +1278,54 @@
     __ leap(rcx, Operand(rax, rdx, times_1, 0));
     __ testb(rcx, Immediate(kSmiTagMask));
     __ j(not_zero, &runtime_call, Label::kNear);
-    __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rbx);
-    __ j(below, &runtime_call, Label::kNear);
-    __ CmpObjectType(rdx, FIRST_JS_RECEIVER_TYPE, rcx);
-    __ j(below, &runtime_call, Label::kNear);
+
+    __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+    __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
     __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
-    __ j(zero, &return_unequal, Label::kNear);
+    __ j(not_zero, &undetectable);
     __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
-    __ j(zero, &return_unequal, Label::kNear);
-    // The objects are both undetectable, so they both compare as the value
-    // undefined, and are equal.
-    __ Set(rax, EQUAL);
+    __ j(not_zero, &return_unequal);
+
+    __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
+    __ j(below, &runtime_call, Label::kNear);
+    __ CmpInstanceType(rcx, FIRST_JS_RECEIVER_TYPE);
+    __ j(below, &runtime_call, Label::kNear);
+
     __ bind(&return_unequal);
-    // Return non-equal by returning the non-zero object pointer in rax,
-    // or return equal if we fell through to here.
+    // Return non-equal by returning the non-zero object pointer in rax.
+    __ ret(0);
+
+    __ bind(&undetectable);
+    __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+             Immediate(1 << Map::kIsUndetectable));
+    __ j(zero, &return_unequal);
+    __ Set(rax, EQUAL);
     __ ret(0);
   }
   __ bind(&runtime_call);
 
-  // Push arguments below the return address to prepare jump to builtin.
-  __ PopReturnAddressTo(rcx);
-  __ Push(rdx);
-  __ Push(rax);
-
-  // Figure out which native to call and setup the arguments.
   if (cc == equal) {
-    __ PushReturnAddressFrom(rcx);
-    __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(rdx);
+      __ Push(rax);
+      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+    }
+    // Turn true into 0 and false into some non-zero value.
+    STATIC_ASSERT(EQUAL == 0);
+    __ LoadRoot(rdx, Heap::kTrueValueRootIndex);
+    __ subp(rax, rdx);
+    __ Ret();
   } else {
+    // Push arguments below the return address to prepare jump to builtin.
+    __ PopReturnAddressTo(rcx);
+    __ Push(rdx);
+    __ Push(rax);
     __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
     __ PushReturnAddressFrom(rcx);
-    __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
-                                             : Runtime::kCompare);
+    __ TailCallRuntime(Runtime::kCompare);
   }
 
   __ bind(&miss);
@@ -2002,7 +1557,8 @@
 
   __ bind(&call_function);
   __ Set(rax, argc);
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+                                                    tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&extra_checks_or_miss);
@@ -2040,7 +1596,7 @@
 
   __ bind(&call);
   __ Set(rax, argc);
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&uninitialized);
@@ -2158,12 +1714,34 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
-  // Enter the exit frame that transitions from JavaScript to C++.
 #ifdef _WIN64
-  int arg_stack_space = (result_size() < 2 ? 2 : 4);
-#else   // _WIN64
-  int arg_stack_space = 0;
+  // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the
+  // stack to be aligned to 16 bytes. It only allows a single-word to be
+  // returned in register rax. Larger return sizes must be written to an address
+  // passed as a hidden first argument.
+  const Register kCCallArg0 = rcx;
+  const Register kCCallArg1 = rdx;
+  const Register kCCallArg2 = r8;
+  const Register kCCallArg3 = r9;
+  const int kArgExtraStackSpace = 2;
+  const int kMaxRegisterResultSize = 1;
+#else
+  // GCC / Clang passes arguments in rdi, rsi, rdx, rcx, r8, r9. Simple results
+  // are returned in rax, and a struct of two pointers are returned in rax+rdx.
+  // Larger return sizes must be written to an address passed as a hidden first
+  // argument.
+  const Register kCCallArg0 = rdi;
+  const Register kCCallArg1 = rsi;
+  const Register kCCallArg2 = rdx;
+  const Register kCCallArg3 = rcx;
+  const int kArgExtraStackSpace = 0;
+  const int kMaxRegisterResultSize = 2;
 #endif  // _WIN64
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  int arg_stack_space =
+      kArgExtraStackSpace +
+      (result_size() <= kMaxRegisterResultSize ? 0 : result_size());
   if (argv_in_register()) {
     DCHECK(!save_doubles());
     __ EnterApiExitFrame(arg_stack_space);
@@ -2179,56 +1757,41 @@
   // r14: number of arguments including receiver (C callee-saved).
   // r15: argv pointer (C callee-saved).
 
-  // Simple results returned in rax (both AMD64 and Win64 calling conventions).
-  // Complex results must be written to address passed as first argument.
-  // AMD64 calling convention: a struct of two pointers in rax+rdx
-
   // Check stack alignment.
   if (FLAG_debug_code) {
     __ CheckStackAlignment();
   }
 
-  // Call C function.
-#ifdef _WIN64
-  // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
-  // Pass argv and argc as two parameters. The arguments object will
-  // be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
-  if (result_size() < 2) {
+  // Call C function. The arguments object will be created by stubs declared by
+  // DECLARE_RUNTIME_FUNCTION().
+  if (result_size() <= kMaxRegisterResultSize) {
     // Pass a pointer to the Arguments object as the first argument.
-    // Return result in single register (rax).
-    __ movp(rcx, r14);  // argc.
-    __ movp(rdx, r15);  // argv.
-    __ Move(r8, ExternalReference::isolate_address(isolate()));
+    // Return result in single register (rax), or a register pair (rax, rdx).
+    __ movp(kCCallArg0, r14);  // argc.
+    __ movp(kCCallArg1, r15);  // argv.
+    __ Move(kCCallArg2, ExternalReference::isolate_address(isolate()));
   } else {
-    DCHECK_EQ(2, result_size());
+    DCHECK_LE(result_size(), 3);
     // Pass a pointer to the result location as the first argument.
-    __ leap(rcx, StackSpaceOperand(2));
+    __ leap(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
     // Pass a pointer to the Arguments object as the second argument.
-    __ movp(rdx, r14);  // argc.
-    __ movp(r8, r15);   // argv.
-    __ Move(r9, ExternalReference::isolate_address(isolate()));
+    __ movp(kCCallArg1, r14);  // argc.
+    __ movp(kCCallArg2, r15);  // argv.
+    __ Move(kCCallArg3, ExternalReference::isolate_address(isolate()));
   }
-
-#else  // _WIN64
-  // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
-  __ movp(rdi, r14);  // argc.
-  __ movp(rsi, r15);  // argv.
-  __ Move(rdx, ExternalReference::isolate_address(isolate()));
-#endif  // _WIN64
   __ call(rbx);
-  // Result is in rax - do not destroy this register!
 
-#ifdef _WIN64
-  // If return value is on the stack, pop it to registers.
-  if (result_size() > 1) {
-    DCHECK_EQ(2, result_size());
+  if (result_size() > kMaxRegisterResultSize) {
     // Read result values stored on stack. Result is stored
-    // above the four argument mirror slots and the two
-    // Arguments object slots.
-    __ movq(rax, Operand(rsp, 6 * kRegisterSize));
-    __ movq(rdx, Operand(rsp, 7 * kRegisterSize));
+    // above the the two Arguments object slots on Win64.
+    DCHECK_LE(result_size(), 3);
+    __ movq(kReturnRegister0, StackSpaceOperand(kArgExtraStackSpace + 0));
+    __ movq(kReturnRegister1, StackSpaceOperand(kArgExtraStackSpace + 1));
+    if (result_size() > 2) {
+      __ movq(kReturnRegister2, StackSpaceOperand(kArgExtraStackSpace + 2));
+    }
   }
-#endif  // _WIN64
+  // Result is in rax, rdx:rax or r8:rdx:rax - do not destroy these registers!
 
   // Check result for exception sentinel.
   Label exception_returned;
@@ -3068,6 +2631,42 @@
 }
 
 
+void ToNameStub::Generate(MacroAssembler* masm) {
+  // The ToName stub takes one argument in rax.
+  Label is_number;
+  __ JumpIfSmi(rax, &is_number, Label::kNear);
+
+  Label not_name;
+  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+  __ CmpObjectType(rax, LAST_NAME_TYPE, rdi);
+  // rax: receiver
+  // rdi: receiver map
+  __ j(above, &not_name, Label::kNear);
+  __ Ret();
+  __ bind(&not_name);
+
+  Label not_heap_number;
+  __ CompareRoot(rdi, Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ bind(&is_number);
+  NumberToStringStub stub(isolate());
+  __ TailCallStub(&stub);
+  __ bind(&not_heap_number);
+
+  Label not_oddball;
+  __ CmpInstanceType(rdi, ODDBALL_TYPE);
+  __ j(not_equal, &not_oddball, Label::kNear);
+  __ movp(rax, FieldOperand(rax, Oddball::kToStringOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+
+  __ PopReturnAddressTo(rcx);     // Pop return address.
+  __ Push(rax);                   // Push argument.
+  __ PushReturnAddressFrom(rcx);  // Push return address.
+  __ TailCallRuntime(Runtime::kToName);
+}
+
+
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -3283,21 +2882,17 @@
   __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   __ JumpIfNotRoot(rcx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
   __ JumpIfNotRoot(rbx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
-  if (op() != Token::EQ_STRICT && is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
-  } else {
-    if (!Token::IsEqualityOp(op())) {
-      __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
-      __ AssertSmi(rax);
-      __ movp(rdx, FieldOperand(rdx, Oddball::kToNumberOffset));
-      __ AssertSmi(rdx);
-      __ pushq(rax);
-      __ movq(rax, rdx);
-      __ popq(rdx);
-    }
-    __ subp(rax, rdx);
-    __ Ret();
+  if (!Token::IsEqualityOp(op())) {
+    __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
+    __ AssertSmi(rax);
+    __ movp(rdx, FieldOperand(rdx, Oddball::kToNumberOffset));
+    __ AssertSmi(rdx);
+    __ pushq(rax);
+    __ movq(rax, rdx);
+    __ popq(rdx);
   }
+  __ subp(rax, rdx);
+  __ Ret();
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -3380,7 +2975,7 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
                      CompareICState::GENERIC, CompareICState::GENERIC);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
@@ -3614,8 +3209,6 @@
   if (Token::IsEqualityOp(op())) {
     __ subp(rax, rdx);
     __ ret(0);
-  } else if (is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   } else {
     __ PopReturnAddressTo(rcx);
     __ Push(rdx);
@@ -3913,11 +3506,8 @@
                            regs_.scratch0(),
                            &dont_need_remembered_set);
 
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch0(),
-                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                     not_zero,
-                     &dont_need_remembered_set);
+    __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+                        &dont_need_remembered_set);
 
     // First notify the incremental marker if necessary, then update the
     // remembered set.
@@ -4867,6 +4457,626 @@
 }
 
 
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rdi    : target
+  //  -- rdx    : new target
+  //  -- rsi    : context
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(rdi);
+  __ AssertReceiver(rdx);
+
+  // Verify that the new target is a JSFunction.
+  Label new_object;
+  __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rbx);
+  __ j(not_equal, &new_object);
+
+  // Load the initial map and verify that it's in fact a map.
+  __ movp(rcx, FieldOperand(rdx, JSFunction::kPrototypeOrInitialMapOffset));
+  __ JumpIfSmi(rcx, &new_object);
+  __ CmpObjectType(rcx, MAP_TYPE, rbx);
+  __ j(not_equal, &new_object);
+
+  // Fall back to runtime if the target differs from the new target's
+  // initial map constructor.
+  __ cmpp(rdi, FieldOperand(rcx, Map::kConstructorOrBackPointerOffset));
+  __ j(not_equal, &new_object);
+
+  // Allocate the JSObject on the heap.
+  Label allocate, done_allocate;
+  __ movzxbl(rbx, FieldOperand(rcx, Map::kInstanceSizeOffset));
+  __ leal(rbx, Operand(rbx, times_pointer_size, 0));
+  __ Allocate(rbx, rax, rdi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
+  __ bind(&done_allocate);
+
+  // Initialize the JSObject fields.
+  __ movp(Operand(rax, JSObject::kMapOffset), rcx);
+  __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
+  __ movp(Operand(rax, JSObject::kPropertiesOffset), rbx);
+  __ movp(Operand(rax, JSObject::kElementsOffset), rbx);
+  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+  __ leap(rbx, Operand(rax, JSObject::kHeaderSize));
+
+  // ----------- S t a t e -------------
+  //  -- rax    : result (untagged)
+  //  -- rbx    : result fields (untagged)
+  //  -- rdi    : result end (untagged)
+  //  -- rcx    : initial map
+  //  -- rsi    : context
+  //  -- rsp[0] : return address
+  // -----------------------------------
+
+  // Perform in-object slack tracking if requested.
+  Label slack_tracking;
+  STATIC_ASSERT(Map::kNoSlackTracking == 0);
+  __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
+  __ testl(FieldOperand(rcx, Map::kBitField3Offset),
+           Immediate(Map::ConstructionCounter::kMask));
+  __ j(not_zero, &slack_tracking, Label::kNear);
+  {
+    // Initialize all in-object fields with undefined.
+    __ InitializeFieldsWithFiller(rbx, rdi, r11);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ incp(rax);
+    __ Ret();
+  }
+  __ bind(&slack_tracking);
+  {
+    // Decrease generous allocation count.
+    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+    __ subl(FieldOperand(rcx, Map::kBitField3Offset),
+            Immediate(1 << Map::ConstructionCounter::kShift));
+
+    // Initialize the in-object fields with undefined.
+    __ movzxbl(rdx, FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset));
+    __ negp(rdx);
+    __ leap(rdx, Operand(rdi, rdx, times_pointer_size, 0));
+    __ InitializeFieldsWithFiller(rbx, rdx, r11);
+
+    // Initialize the remaining (reserved) fields with one pointer filler map.
+    __ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
+    __ InitializeFieldsWithFiller(rdx, rdi, r11);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ incp(rax);
+
+    // Check if we can finalize the instance size.
+    Label finalize;
+    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+    __ testl(FieldOperand(rcx, Map::kBitField3Offset),
+             Immediate(Map::ConstructionCounter::kMask));
+    __ j(zero, &finalize, Label::kNear);
+    __ Ret();
+
+    // Finalize the instance size.
+    __ bind(&finalize);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(rax);
+      __ Push(rcx);
+      __ CallRuntime(Runtime::kFinalizeInstanceSize);
+      __ Pop(rax);
+    }
+    __ Ret();
+  }
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Integer32ToSmi(rbx, rbx);
+    __ Push(rcx);
+    __ Push(rbx);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Pop(rcx);
+  }
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ decp(rax);
+  __ movzxbl(rbx, FieldOperand(rcx, Map::kInstanceSizeOffset));
+  __ leap(rdi, Operand(rax, rbx, times_pointer_size, 0));
+  __ jmp(&done_allocate);
+
+  // Fall back to %NewObject.
+  __ bind(&new_object);
+  __ PopReturnAddressTo(rcx);
+  __ Push(rdi);
+  __ Push(rdx);
+  __ PushReturnAddressFrom(rcx);
+  __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rdi    : function
+  //  -- rsi    : context
+  //  -- rbp    : frame pointer
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(rdi);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make rdx point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ movp(rdx, rbp);
+    __ jmp(&loop_entry, Label::kNear);
+    __ bind(&loop);
+    __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kMarkerOffset));
+    __ j(not_equal, &loop);
+  }
+
+  // Check if we have rest parameters (only possible if we have an
+  // arguments adaptor frame below the function frame).
+  Label no_rest_parameters;
+  __ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
+  __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(not_equal, &no_rest_parameters, Label::kNear);
+
+  // Check if the arguments adaptor frame contains more arguments than
+  // specified by the function's internal formal parameter count.
+  Label rest_parameters;
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadSharedFunctionInfoSpecialField(
+      rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
+  __ SmiToInteger32(
+      rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ subl(rax, rcx);
+  __ j(greater, &rest_parameters);
+
+  // Return an empty rest parameter array.
+  __ bind(&no_rest_parameters);
+  {
+    // ----------- S t a t e -------------
+    //  -- rsi    : context
+    //  -- rsp[0] : return address
+    // -----------------------------------
+
+    // Allocate an empty rest parameter array.
+    Label allocate, done_allocate;
+    __ Allocate(JSArray::kSize, rax, rdx, rcx, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the rest parameter array in rax.
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, rcx);
+    __ movp(FieldOperand(rax, JSArray::kMapOffset), rcx);
+    __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+    __ movp(FieldOperand(rax, JSArray::kPropertiesOffset), rcx);
+    __ movp(FieldOperand(rax, JSArray::kElementsOffset), rcx);
+    __ movp(FieldOperand(rax, JSArray::kLengthOffset), Immediate(0));
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(Smi::FromInt(JSArray::kSize));
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+    }
+    __ jmp(&done_allocate);
+  }
+
+  __ bind(&rest_parameters);
+  {
+    // Compute the pointer to the first rest parameter (skippping the receiver).
+    __ leap(rbx, Operand(rbx, rax, times_pointer_size,
+                         StandardFrameConstants::kCallerSPOffset -
+                             1 * kPointerSize));
+
+    // ----------- S t a t e -------------
+    //  -- rsi    : context
+    //  -- rax    : number of rest parameters
+    //  -- rbx    : pointer to first rest parameters
+    //  -- rsp[0] : return address
+    // -----------------------------------
+
+    // Allocate space for the rest parameter array plus the backing store.
+    Label allocate, done_allocate;
+    __ leal(rcx, Operand(rax, times_pointer_size,
+                         JSArray::kSize + FixedArray::kHeaderSize));
+    __ Allocate(rcx, rdx, rdi, no_reg, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Compute the arguments.length in rdi.
+    __ Integer32ToSmi(rdi, rax);
+
+    // Setup the elements array in rdx.
+    __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+    __ movp(FieldOperand(rdx, FixedArray::kMapOffset), rcx);
+    __ movp(FieldOperand(rdx, FixedArray::kLengthOffset), rdi);
+    {
+      Label loop, done_loop;
+      __ Set(rcx, 0);
+      __ bind(&loop);
+      __ cmpl(rcx, rax);
+      __ j(equal, &done_loop, Label::kNear);
+      __ movp(kScratchRegister, Operand(rbx, 0 * kPointerSize));
+      __ movp(
+          FieldOperand(rdx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+          kScratchRegister);
+      __ subp(rbx, Immediate(1 * kPointerSize));
+      __ addl(rcx, Immediate(1));
+      __ jmp(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Setup the rest parameter array in rax.
+    __ leap(rax,
+            Operand(rdx, rax, times_pointer_size, FixedArray::kHeaderSize));
+    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, rcx);
+    __ movp(FieldOperand(rax, JSArray::kMapOffset), rcx);
+    __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+    __ movp(FieldOperand(rax, JSArray::kPropertiesOffset), rcx);
+    __ movp(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+    __ movp(FieldOperand(rax, JSArray::kLengthOffset), rdi);
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Integer32ToSmi(rax, rax);
+      __ Integer32ToSmi(rcx, rcx);
+      __ Push(rax);
+      __ Push(rbx);
+      __ Push(rcx);
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+      __ movp(rdx, rax);
+      __ Pop(rbx);
+      __ Pop(rax);
+      __ SmiToInteger32(rax, rax);
+    }
+    __ jmp(&done_allocate);
+  }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rdi    : function
+  //  -- rsi    : context
+  //  -- rbp    : frame pointer
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(rdi);
+
+  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadSharedFunctionInfoSpecialField(
+      rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
+  __ leap(rdx, Operand(rbp, rcx, times_pointer_size,
+                       StandardFrameConstants::kCallerSPOffset));
+  __ Integer32ToSmi(rcx, rcx);
+
+  // rcx : number of parameters (tagged)
+  // rdx : parameters pointer
+  // rdi : function
+  // rsp[0] : return address
+  // Registers used over the whole function:
+  //  rbx: the mapped parameter count (untagged)
+  //  rax: the allocated object (tagged).
+  Factory* factory = isolate()->factory();
+
+  __ SmiToInteger64(rbx, rcx);
+  // rbx = parameter count (untagged)
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ movp(r8, Operand(rax, StandardFrameConstants::kContextOffset));
+  __ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(equal, &adaptor_frame);
+
+  // No adaptor, parameter count = argument count.
+  __ movp(r11, rbx);
+  __ jmp(&try_allocate, Label::kNear);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ SmiToInteger64(
+      r11, Operand(rax, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ leap(rdx, Operand(rax, r11, times_pointer_size,
+                       StandardFrameConstants::kCallerSPOffset));
+
+  // rbx = parameter count (untagged)
+  // r11 = argument count (untagged)
+  // Compute the mapped parameter count = min(rbx, r11) in rbx.
+  __ cmpp(rbx, r11);
+  __ j(less_equal, &try_allocate, Label::kNear);
+  __ movp(rbx, r11);
+
+  __ bind(&try_allocate);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  Label no_parameter_map;
+  __ xorp(r8, r8);
+  __ testp(rbx, rbx);
+  __ j(zero, &no_parameter_map, Label::kNear);
+  __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
+  __ bind(&no_parameter_map);
+
+  // 2. Backing store.
+  __ leap(r8, Operand(r8, r11, times_pointer_size, FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ addp(r8, Immediate(JSSloppyArgumentsObject::kSize));
+
+  // Do the allocation of all three objects in one go.
+  __ Allocate(r8, rax, r9, no_reg, &runtime, TAG_OBJECT);
+
+  // rax = address of new object(s) (tagged)
+  // r11 = argument count (untagged)
+  // Get the arguments map from the current native context into r9.
+  Label has_mapped_parameters, instantiate;
+  __ movp(r9, NativeContextOperand());
+  __ testp(rbx, rbx);
+  __ j(not_zero, &has_mapped_parameters, Label::kNear);
+
+  const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX;
+  __ movp(r9, Operand(r9, Context::SlotOffset(kIndex)));
+  __ jmp(&instantiate, Label::kNear);
+
+  const int kAliasedIndex = Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX;
+  __ bind(&has_mapped_parameters);
+  __ movp(r9, Operand(r9, Context::SlotOffset(kAliasedIndex)));
+  __ bind(&instantiate);
+
+  // rax = address of new object (tagged)
+  // rbx = mapped parameter count (untagged)
+  // r11 = argument count (untagged)
+  // r9 = address of arguments map (tagged)
+  __ movp(FieldOperand(rax, JSObject::kMapOffset), r9);
+  __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
+  __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
+  __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
+
+  // Set up the callee in-object property.
+  __ AssertNotSmi(rdi);
+  __ movp(FieldOperand(rax, JSSloppyArgumentsObject::kCalleeOffset), rdi);
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  // Note: r11 is tagged from here on.
+  __ Integer32ToSmi(r11, r11);
+  __ movp(FieldOperand(rax, JSSloppyArgumentsObject::kLengthOffset), r11);
+
+  // Set up the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, rdi will point there, otherwise to the
+  // backing store.
+  __ leap(rdi, Operand(rax, JSSloppyArgumentsObject::kSize));
+  __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+
+  // rax = address of new object (tagged)
+  // rbx = mapped parameter count (untagged)
+  // r11 = argument count (tagged)
+  // rdi = address of parameter map or backing store (tagged)
+
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  __ testp(rbx, rbx);
+  __ j(zero, &skip_parameter_map);
+
+  __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
+  // rbx contains the untagged argument count. Add 2 and tag to write.
+  __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+  __ Integer64PlusConstantToSmi(r9, rbx, 2);
+  __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
+  __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
+  __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+  __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+
+  // Load tagged parameter count into r9.
+  __ Integer32ToSmi(r9, rbx);
+  __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
+  __ addp(r8, rcx);
+  __ subp(r8, r9);
+  __ movp(rcx, rdi);
+  __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+  __ SmiToInteger64(r9, r9);
+  // r9 = loop variable (untagged)
+  // r8 = mapping index (tagged)
+  // rcx = address of parameter map (tagged)
+  // rdi = address of backing store (tagged)
+  __ jmp(&parameters_test, Label::kNear);
+
+  __ bind(&parameters_loop);
+  __ subp(r9, Immediate(1));
+  __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+  __ movp(FieldOperand(rcx, r9, times_pointer_size, kParameterMapHeaderSize),
+          r8);
+  __ movp(FieldOperand(rdi, r9, times_pointer_size, FixedArray::kHeaderSize),
+          kScratchRegister);
+  __ SmiAddConstant(r8, r8, Smi::FromInt(1));
+  __ bind(&parameters_test);
+  __ testp(r9, r9);
+  __ j(not_zero, &parameters_loop, Label::kNear);
+
+  __ bind(&skip_parameter_map);
+
+  // r11 = argument count (tagged)
+  // rdi = address of backing store (tagged)
+  // Copy arguments header and remaining slots (if there are any).
+  __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
+          factory->fixed_array_map());
+  __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r11);
+
+  Label arguments_loop, arguments_test;
+  __ movp(r8, rbx);
+  // Untag r11 for the loop below.
+  __ SmiToInteger64(r11, r11);
+  __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
+  __ subp(rdx, kScratchRegister);
+  __ jmp(&arguments_test, Label::kNear);
+
+  __ bind(&arguments_loop);
+  __ subp(rdx, Immediate(kPointerSize));
+  __ movp(r9, Operand(rdx, 0));
+  __ movp(FieldOperand(rdi, r8,
+                       times_pointer_size,
+                       FixedArray::kHeaderSize),
+          r9);
+  __ addp(r8, Immediate(1));
+
+  __ bind(&arguments_test);
+  __ cmpp(r8, r11);
+  __ j(less, &arguments_loop, Label::kNear);
+
+  // Return.
+  __ ret(0);
+
+  // Do the runtime call to allocate the arguments object.
+  // r11 = argument count (untagged)
+  __ bind(&runtime);
+  __ Integer32ToSmi(r11, r11);
+  __ PopReturnAddressTo(rax);
+  __ Push(rdi);  // Push function.
+  __ Push(rdx);  // Push parameters pointer.
+  __ Push(r11);  // Push parameter count.
+  __ PushReturnAddressFrom(rax);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rdi    : function
+  //  -- rsi    : context
+  //  -- rbp    : frame pointer
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(rdi);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make rdx point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ movp(rdx, rbp);
+    __ jmp(&loop_entry, Label::kNear);
+    __ bind(&loop);
+    __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kMarkerOffset));
+    __ j(not_equal, &loop);
+  }
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
+  __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(equal, &arguments_adaptor, Label::kNear);
+  {
+    __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadSharedFunctionInfoSpecialField(
+        rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
+    __ leap(rbx, Operand(rdx, rax, times_pointer_size,
+                         StandardFrameConstants::kCallerSPOffset -
+                             1 * kPointerSize));
+  }
+  __ jmp(&arguments_done, Label::kNear);
+  __ bind(&arguments_adaptor);
+  {
+    __ SmiToInteger32(
+        rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ leap(rbx, Operand(rbx, rax, times_pointer_size,
+                         StandardFrameConstants::kCallerSPOffset -
+                             1 * kPointerSize));
+  }
+  __ bind(&arguments_done);
+
+  // ----------- S t a t e -------------
+  //  -- rax    : number of arguments
+  //  -- rbx    : pointer to the first argument
+  //  -- rsi    : context
+  //  -- rsp[0] : return address
+  // -----------------------------------
+
+  // Allocate space for the strict arguments object plus the backing store.
+  Label allocate, done_allocate;
+  __ leal(rcx, Operand(rax, times_pointer_size, JSStrictArgumentsObject::kSize +
+                                                    FixedArray::kHeaderSize));
+  __ Allocate(rcx, rdx, rdi, no_reg, &allocate, TAG_OBJECT);
+  __ bind(&done_allocate);
+
+  // Compute the arguments.length in rdi.
+  __ Integer32ToSmi(rdi, rax);
+
+  // Setup the elements array in rdx.
+  __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+  __ movp(FieldOperand(rdx, FixedArray::kMapOffset), rcx);
+  __ movp(FieldOperand(rdx, FixedArray::kLengthOffset), rdi);
+  {
+    Label loop, done_loop;
+    __ Set(rcx, 0);
+    __ bind(&loop);
+    __ cmpl(rcx, rax);
+    __ j(equal, &done_loop, Label::kNear);
+    __ movp(kScratchRegister, Operand(rbx, 0 * kPointerSize));
+    __ movp(
+        FieldOperand(rdx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+        kScratchRegister);
+    __ subp(rbx, Immediate(1 * kPointerSize));
+    __ addl(rcx, Immediate(1));
+    __ jmp(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Setup the strict arguments object in rax.
+  __ leap(rax,
+          Operand(rdx, rax, times_pointer_size, FixedArray::kHeaderSize));
+  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, rcx);
+  __ movp(FieldOperand(rax, JSStrictArgumentsObject::kMapOffset), rcx);
+  __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+  __ movp(FieldOperand(rax, JSStrictArgumentsObject::kPropertiesOffset), rcx);
+  __ movp(FieldOperand(rax, JSStrictArgumentsObject::kElementsOffset), rdx);
+  __ movp(FieldOperand(rax, JSStrictArgumentsObject::kLengthOffset), rdi);
+  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+  __ Ret();
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Integer32ToSmi(rax, rax);
+    __ Integer32ToSmi(rcx, rcx);
+    __ Push(rax);
+    __ Push(rbx);
+    __ Push(rcx);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ movp(rdx, rax);
+    __ Pop(rbx);
+    __ Pop(rax);
+    __ SmiToInteger32(rax, rax);
+  }
+  __ jmp(&done_allocate);
+}
+
+
 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
   Register context_reg = rsi;
   Register slot_reg = rbx;
@@ -5205,11 +5415,10 @@
   __ jmp(&leave_exit_frame);
 }
 
-
 static void CallApiFunctionStubHelper(MacroAssembler* masm,
                                       const ParameterCount& argc,
                                       bool return_first_arg,
-                                      bool call_data_undefined) {
+                                      bool call_data_undefined, bool is_lazy) {
   // ----------- S t a t e -------------
   //  -- rdi                 : callee
   //  -- rbx                 : call_data
@@ -5272,8 +5481,10 @@
   // Push return address back on stack.
   __ PushReturnAddressFrom(return_address);
 
-  // load context from callee
-  __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
+  if (!is_lazy) {
+    // load context from callee
+    __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
+  }
 
   // Allocate the v8::Arguments structure in the arguments' space since
   // it's not controlled by GC.
@@ -5346,7 +5557,7 @@
 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
   bool call_data_undefined = this->call_data_undefined();
   CallApiFunctionStubHelper(masm, ParameterCount(rax), false,
-                            call_data_undefined);
+                            call_data_undefined, false);
 }
 
 
@@ -5354,18 +5565,19 @@
   bool is_store = this->is_store();
   int argc = this->argc();
   bool call_data_undefined = this->call_data_undefined();
+  bool is_lazy = this->is_lazy();
   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined);
+                            call_data_undefined, is_lazy);
 }
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- rsp[0]                  : return address
-  //  -- rsp[8]                  : name
-  //  -- rsp[16 - kArgsLength*8] : PropertyCallbackArguments object
+  //  -- rsp[0]                          : return address
+  //  -- rsp[8]                          : name
+  //  -- rsp[16 .. (16 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
   //  -- ...
-  //  -- r8                    : api_function_address
+  //  -- r8                              : api_function_address
   // -----------------------------------
 
 #if defined(__MINGW64__) || defined(_WIN64)
@@ -5381,23 +5593,25 @@
   DCHECK(api_function_address.is(r8));
   Register scratch = rax;
 
-  // v8::Arguments::values_ and handler for name.
-  const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
+  // v8::PropertyCallbackInfo::args_ array and name handle.
+  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
 
-  // Allocate v8::AccessorInfo in non-GCed stack space.
+  // Allocate v8::PropertyCallbackInfo in non-GCed stack space.
   const int kArgStackSpace = 1;
 
-  __ leap(name_arg, Operand(rsp, kPCOnStackSize));
+  // Load address of v8::PropertyAccessorInfo::args_ array.
+  __ leap(scratch, Operand(rsp, 2 * kPointerSize));
 
   PrepareCallApiFunction(masm, kArgStackSpace);
-  __ leap(scratch, Operand(name_arg, 1 * kPointerSize));
+  // Create v8::PropertyCallbackInfo object on the stack and initialize
+  // it's args_ field.
+  Operand info_object = StackSpaceOperand(0);
+  __ movp(info_object, scratch);
 
-  // v8::PropertyAccessorInfo::args_.
-  __ movp(StackSpaceOperand(0), scratch);
-
+  __ leap(name_arg, Operand(scratch, -kPointerSize));
   // The context register (rsi) has been saved in PrepareCallApiFunction and
   // could be used to pass arguments.
-  __ leap(accessor_info_arg, StackSpaceOperand(0));
+  __ leap(accessor_info_arg, info_object);
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
@@ -5407,13 +5621,12 @@
   DCHECK(!api_function_address.is(accessor_info_arg) &&
          !api_function_address.is(name_arg));
 
-  // The name handler is counted as an argument.
-  StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
-  Operand return_value_operand = args.GetArgumentOperand(
-      PropertyCallbackArguments::kArgsLength - 1 -
-      PropertyCallbackArguments::kReturnValueOffset);
+  // +3 is to skip prolog, return address and name handle.
+  Operand return_value_operand(
+      rbp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, getter_arg,
-                           kStackSpace, nullptr, return_value_operand, NULL);
+                           kStackUnwindSpace, nullptr, return_value_operand,
+                           NULL);
 }
 
 
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index c2fd970..ddf59eb 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -88,26 +88,6 @@
 }
 
 
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  // Set the register values. The values are not important as there are no
-  // callee saved registers in JavaScript frames, so all registers are
-  // spilled. Registers rbp and rsp are set to the correct values though.
-  for (int i = 0; i < Register::kNumRegisters; i++) {
-    input_->SetRegister(i, i * 4);
-  }
-  input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
-  input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
-    input_->SetDoubleRegister(i, 0.0);
-  }
-
-  // Fill the frame content from the actual data on the frame.
-  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
-    input_->SetFrameSlot(i, Memory::uintptr_at(tos + i));
-  }
-}
-
-
 void Deoptimizer::SetPlatformCompiledStubRegisters(
     FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
   intptr_t handler =
@@ -125,8 +105,7 @@
   }
 }
 
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
   // There is no dynamic alignment padding on x64 in the input frame.
   return false;
 }
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 05b199d..a9532dc 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -956,6 +956,12 @@
     int mod, regop, rm, vvvv = vex_vreg();
     get_modrm(*current, &mod, &regop, &rm);
     switch (opcode) {
+      case 0x0a:
+        AppendToBuffer("vroundss %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(",0x%x", *current++);
+        break;
       case 0x0b:
         AppendToBuffer("vroundsd %s,%s,", NameOfXMMRegister(regop),
                        NameOfXMMRegister(vvvv));
@@ -1516,6 +1522,12 @@
         current += PrintRightOperand(current);
         AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
         current += 1;
+      } else if (third_byte == 0x0a) {
+        get_modrm(*current, &mod, &regop, &rm);
+        AppendToBuffer("roundss %s,", NameOfXMMRegister(regop));
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(",0x%x", (*current) & 3);
+        current += 1;
       } else if (third_byte == 0x0b) {
         get_modrm(*current, &mod, &regop, &rm);
          // roundsd xmm, xmm/m64, imm8
diff --git a/src/x64/interface-descriptors-x64.cc b/src/x64/interface-descriptors-x64.cc
index 79315c7..0913d1c 100644
--- a/src/x64/interface-descriptors-x64.cc
+++ b/src/x64/interface-descriptors-x64.cc
@@ -54,20 +54,6 @@
 const Register StringCompareDescriptor::RightRegister() { return rax; }
 
 
-const Register ArgumentsAccessReadDescriptor::index() { return rdx; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return rax; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return rdi; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return rcx; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return rdx; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return rcx; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return rdx; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return rbx; }
-
-
 const Register ApiGetterDescriptor::function_address() { return r8; }
 
 
@@ -96,6 +82,30 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rdi, rdx};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rdi};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rdi};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rdi};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 
 void TypeofDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -121,6 +131,10 @@
 
 
 // static
+const Register ToNameDescriptor::ReceiverRegister() { return rax; }
+
+
+// static
 const Register ToObjectDescriptor::ReceiverRegister() { return rax; }
 
 
@@ -166,13 +180,6 @@
 }
 
 
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {rcx, rax};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void CallFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {rdi};
@@ -406,6 +413,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+      kInterpreterDispatchTableRegister};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -417,7 +432,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
@@ -429,7 +443,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterCEntryDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 9952eb3..e72d40b 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -251,38 +251,9 @@
                                 Condition cc,
                                 Label* branch,
                                 Label::Distance distance) {
-  if (serializer_enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    if (scratch.is(object)) {
-      Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
-      andp(scratch, kScratchRegister);
-    } else {
-      Move(scratch, ExternalReference::new_space_mask(isolate()));
-      andp(scratch, object);
-    }
-    Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
-    cmpp(scratch, kScratchRegister);
-    j(cc, branch, distance);
-  } else {
-    DCHECK(kPointerSize == kInt64Size
-        ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
-        : kPointerSize == kInt32Size);
-    intptr_t new_space_start =
-        reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
-    Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
-         Assembler::RelocInfoNone());
-    if (scratch.is(object)) {
-      addp(scratch, kScratchRegister);
-    } else {
-      leap(scratch, Operand(object, kScratchRegister, times_1, 0));
-    }
-    andp(scratch,
-         Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
-    j(cc, branch, distance);
-  }
+  const int mask =
+      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+  CheckPageFlag(object, scratch, mask, cc, branch, distance);
 }
 
 
@@ -507,6 +478,90 @@
   }
 }
 
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+                                               Register code_entry,
+                                               Register scratch) {
+  const int offset = JSFunction::kCodeEntryOffset;
+
+  // The input registers are fixed to make calling the C write barrier function
+  // easier.
+  DCHECK(js_function.is(rdi));
+  DCHECK(code_entry.is(rcx));
+  DCHECK(scratch.is(rax));
+
+  // Since a code entry (value) is always in old space, we don't need to update
+  // remembered set. If incremental marking is off, there is nothing for us to
+  // do.
+  if (!FLAG_incremental_marking) return;
+
+  AssertNotSmi(js_function);
+
+  if (emit_debug_code()) {
+    Label ok;
+    leap(scratch, FieldOperand(js_function, offset));
+    cmpp(code_entry, Operand(scratch, 0));
+    j(equal, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+
+  CheckPageFlag(code_entry, scratch,
+                MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
+                Label::kNear);
+  CheckPageFlag(js_function, scratch,
+                MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
+                Label::kNear);
+
+  // Save input registers.
+  Push(js_function);
+  Push(code_entry);
+
+  const Register dst = scratch;
+  leap(dst, FieldOperand(js_function, offset));
+
+  // Save caller-saved registers.
+  PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+  int argument_count = 3;
+  PrepareCallCFunction(argument_count);
+
+  // Load the argument registers.
+  if (arg_reg_1.is(rcx)) {
+    // Windows calling convention.
+    DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
+
+    movp(arg_reg_1, js_function);  // rcx gets rdi.
+    movp(arg_reg_2, dst);          // rdx gets rax.
+  } else {
+    // AMD64 calling convention.
+    DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
+
+    // rdi is already loaded with js_function.
+    movp(arg_reg_2, dst);  // rsi gets rax.
+  }
+  Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
+
+  {
+    AllowExternalCallThatCantCauseGC scope(this);
+    CallCFunction(
+        ExternalReference::incremental_marking_record_write_code_entry_function(
+            isolate()),
+        argument_count);
+  }
+
+  // Restore caller-saved registers.
+  PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+  // Restore input registers.
+  Pop(code_entry);
+  Pop(js_function);
+
+  bind(&done);
+}
 
 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
   if (emit_debug_code()) Check(cc, reason);
@@ -589,9 +644,9 @@
     // We don't actually want to generate a pile of code for this, so just
     // claim there is a stack frame, without generating one.
     FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   } else {
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   }
   // Control will not return here.
   int3();
@@ -690,18 +745,6 @@
 }
 
 
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                                   const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  DCHECK(flag == JUMP_FUNCTION || has_frame());
-
-  // Fake a parameter count to avoid emitting code to do the check.
-  ParameterCount expected(0);
-  LoadNativeContextSlot(native_context_index, rdi);
-  InvokeFunctionCode(rdi, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
 #define REG(Name) \
   { Register::kCode_##Name }
 
@@ -823,6 +866,30 @@
 }
 
 
+void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
+  if (CpuFeatures::IsSupported(AVX)) {
+    CpuFeatureScope scope(this, AVX);
+    vxorps(dst, dst, dst);
+    vcvtlsi2ss(dst, dst, src);
+  } else {
+    xorps(dst, dst);
+    cvtlsi2ss(dst, src);
+  }
+}
+
+
+void MacroAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
+  if (CpuFeatures::IsSupported(AVX)) {
+    CpuFeatureScope scope(this, AVX);
+    vxorps(dst, dst, dst);
+    vcvtlsi2ss(dst, dst, src);
+  } else {
+    xorps(dst, dst);
+    cvtlsi2ss(dst, src);
+  }
+}
+
+
 void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
   if (CpuFeatures::IsSupported(AVX)) {
     CpuFeatureScope scope(this, AVX);
@@ -918,6 +985,26 @@
 }
 
 
+void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) {
+  if (CpuFeatures::IsSupported(AVX)) {
+    CpuFeatureScope scope(this, AVX);
+    vcvttss2si(dst, src);
+  } else {
+    cvttss2si(dst, src);
+  }
+}
+
+
+void MacroAssembler::Cvttss2si(Register dst, const Operand& src) {
+  if (CpuFeatures::IsSupported(AVX)) {
+    CpuFeatureScope scope(this, AVX);
+    vcvttss2si(dst, src);
+  } else {
+    cvttss2si(dst, src);
+  }
+}
+
+
 void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
   if (CpuFeatures::IsSupported(AVX)) {
     CpuFeatureScope scope(this, AVX);
@@ -3865,6 +3952,19 @@
 }
 
 
+void MacroAssembler::AssertReceiver(Register object) {
+  if (emit_debug_code()) {
+    testb(object, Immediate(kSmiTagMask));
+    Check(not_equal, kOperandIsASmiAndNotAReceiver);
+    Push(object);
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
+    Pop(object);
+    Check(above_equal, kOperandIsNotAReceiver);
+  }
+}
+
+
 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
   if (emit_debug_code()) {
     Label done_checking;
@@ -4168,7 +4268,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -5040,7 +5140,7 @@
     incp(source);
     incp(destination);
     decl(length);
-    j(not_zero, &short_loop);
+    j(not_zero, &short_loop, Label::kNear);
   }
 
   bind(&done);
@@ -5051,13 +5151,13 @@
                                                 Register end_address,
                                                 Register filler) {
   Label loop, entry;
-  jmp(&entry);
+  jmp(&entry, Label::kNear);
   bind(&loop);
   movp(Operand(current_address, 0), filler);
   addp(current_address, Immediate(kPointerSize));
   bind(&entry);
   cmpp(current_address, end_address);
-  j(below, &loop);
+  j(below, &loop, Label::kNear);
 }
 
 
@@ -5363,7 +5463,7 @@
 }
 
 
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
   Label next, start;
   Register empty_fixed_array_value = r8;
   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
@@ -5404,10 +5504,11 @@
 
   bind(&no_elements);
   movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
-  cmpp(rcx, null_value);
+  CompareRoot(rcx, Heap::kNullValueRootIndex);
   j(not_equal, &next);
 }
 
+
 void MacroAssembler::TestJSArrayForAllocationMemento(
     Register receiver_reg,
     Register scratch_reg,
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 1aa2c74..9c0b796 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -18,6 +18,7 @@
 // Give alias names to registers for calling conventions.
 const Register kReturnRegister0 = {Register::kCode_rax};
 const Register kReturnRegister1 = {Register::kCode_rdx};
+const Register kReturnRegister2 = {Register::kCode_r8};
 const Register kJSFunctionRegister = {Register::kCode_rdi};
 const Register kContextRegister = {Register::kCode_rsi};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
@@ -217,7 +218,7 @@
                            Register scratch,
                            Label* branch,
                            Label::Distance distance = Label::kFar) {
-    InNewSpace(object, scratch, not_equal, branch, distance);
+    InNewSpace(object, scratch, zero, branch, distance);
   }
 
   // Check if object is in new space.  Jumps if the object is in new space.
@@ -226,7 +227,7 @@
                         Register scratch,
                         Label* branch,
                         Label::Distance distance = Label::kFar) {
-    InNewSpace(object, scratch, equal, branch, distance);
+    InNewSpace(object, scratch, not_zero, branch, distance);
   }
 
   // Check if an object has the black incremental marking color.  Also uses rcx!
@@ -293,6 +294,11 @@
       PointersToHereCheck pointers_to_here_check_for_value =
           kPointersToHereMaybeInteresting);
 
+  // Notify the garbage collector that we wrote a code entry into a
+  // JSFunction. Only scratch is clobbered by the operation.
+  void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+                                 Register scratch);
+
   void RecordWriteForMap(
       Register object,
       Register map,
@@ -395,10 +401,6 @@
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
-  // Invoke specified builtin JavaScript function.
-  void InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                     const CallWrapper& call_wrapper = NullCallWrapper());
-
   // ---------------------------------------------------------------------------
   // Smi tagging, untagging and operations on tagged smis.
 
@@ -430,6 +432,12 @@
   void SmiToInteger64(Register dst, Register src);
   void SmiToInteger64(Register dst, const Operand& src);
 
+  // Convert smi to double.
+  void SmiToDouble(XMMRegister dst, Register src) {
+    SmiToInteger32(kScratchRegister, src);
+    Cvtlsi2sd(dst, kScratchRegister);
+  }
+
   // Multiply a positive smi's integer value by a power of two.
   // Provides result as 64-bit integer value.
   void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
@@ -811,6 +819,8 @@
   void Cvtlsi2sd(XMMRegister dst, Register src);
   void Cvtlsi2sd(XMMRegister dst, const Operand& src);
 
+  void Cvtlsi2ss(XMMRegister dst, Register src);
+  void Cvtlsi2ss(XMMRegister dst, const Operand& src);
   void Cvtqsi2ss(XMMRegister dst, Register src);
   void Cvtqsi2ss(XMMRegister dst, const Operand& src);
 
@@ -822,6 +832,8 @@
 
   void Cvtsd2si(Register dst, XMMRegister src);
 
+  void Cvttss2si(Register dst, XMMRegister src);
+  void Cvttss2si(Register dst, const Operand& src);
   void Cvttsd2si(Register dst, XMMRegister src);
   void Cvttsd2si(Register dst, const Operand& src);
   void Cvttss2siq(Register dst, XMMRegister src);
@@ -1204,6 +1216,9 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+  void AssertReceiver(Register object);
+
   // Abort execution if argument is not undefined or an AllocationSite, enabled
   // via --debug-code.
   void AssertUndefinedOrAllocationSite(Register object);
@@ -1540,8 +1555,7 @@
 
   // Expects object in rax and returns map with validated enum cache
   // in rax.  Assumes that any other register can be used as a scratch.
-  void CheckEnumCache(Register null_value,
-                      Label* call_runtime);
+  void CheckEnumCache(Label* call_runtime);
 
   // AllocationMemento support. Arrays may have an associated
   // AllocationMemento object that can be checked for in order to pretransition
diff --git a/src/x87/assembler-x87-inl.h b/src/x87/assembler-x87-inl.h
index 0e529c7..7af1d02 100644
--- a/src/x87/assembler-x87-inl.h
+++ b/src/x87/assembler-x87-inl.h
@@ -204,10 +204,8 @@
     Assembler::FlushICache(isolate_, pc_, sizeof(Address));
   }
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
-    // TODO(1550) We are passing NULL as a slot because cell can never be on
-    // evacuation candidate.
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), NULL, cell);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+                                                                  cell);
   }
 }
 
@@ -269,16 +267,6 @@
 }
 
 
-bool RelocInfo::IsPatchedReturnSequence() {
-  return *pc_ == kCallOpcode;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
-  return !Assembler::IsNop(pc());
-}
-
-
 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/src/x87/assembler-x87.cc b/src/x87/assembler-x87.cc
index 5391948..66fda57 100644
--- a/src/x87/assembler-x87.cc
+++ b/src/x87/assembler-x87.cc
@@ -674,6 +674,11 @@
   emit_operand(reg, op);
 }
 
+void Assembler::cmp(const Operand& op, Register reg) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x39);
+  emit_operand(reg, op);
+}
 
 void Assembler::cmp(const Operand& op, const Immediate& imm) {
   EnsureSpace ensure_space(this);
diff --git a/src/x87/assembler-x87.h b/src/x87/assembler-x87.h
index 668dc7b..15fc29c 100644
--- a/src/x87/assembler-x87.h
+++ b/src/x87/assembler-x87.h
@@ -186,6 +186,9 @@
 
 typedef DoubleRegister X87Register;
 
+// TODO(x87) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
+
 enum Condition {
   // any value < 0 is considered no_condition
   no_condition  = -1,
@@ -668,6 +671,7 @@
   void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
   void cmp(Register reg, const Operand& op);
   void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
+  void cmp(const Operand& op, Register reg);
   void cmp(const Operand& op, const Immediate& imm);
   void cmp(const Operand& op, Handle<Object> handle);
 
@@ -935,7 +939,7 @@
 
   // Record a deoptimization reason that can be used by a log or cpu profiler.
   // Use --trace-deopt to enable.
-  void RecordDeoptReason(const int reason, const SourcePosition position);
+  void RecordDeoptReason(const int reason, int raw_position);
 
   // Writes a single byte or word of data in the code stream.  Used for
   // inline tables, e.g., jump-tables.
diff --git a/src/x87/builtins-x87.cc b/src/x87/builtins-x87.cc
index 55ec55f..ce07908 100644
--- a/src/x87/builtins-x87.cc
+++ b/src/x87/builtins-x87.cc
@@ -60,42 +60,45 @@
   __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
 }
 
-
-static void CallRuntimePassFunction(
-    MacroAssembler* masm, Runtime::FunctionId function_id) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+                                           Runtime::FunctionId function_id) {
   // ----------- S t a t e -------------
+  //  -- eax : argument count (preserved for callee)
   //  -- edx : new target (preserved for callee)
   //  -- edi : target function (preserved for callee)
   // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Push the number of arguments to the callee.
+    __ SmiTag(eax);
+    __ push(eax);
+    // Push a copy of the target function and the new target.
+    __ push(edi);
+    __ push(edx);
+    // Function is also the parameter to the runtime call.
+    __ push(edi);
 
-  FrameScope scope(masm, StackFrame::INTERNAL);
-  // Push a copy of the target function and the new target.
-  __ push(edi);
-  __ push(edx);
-  // Function is also the parameter to the runtime call.
-  __ push(edi);
+    __ CallRuntime(function_id, 1);
+    __ mov(ebx, eax);
 
-  __ CallRuntime(function_id, 1);
-  // Restore target function and new target.
-  __ pop(edx);
-  __ pop(edi);
+    // Restore target function and new target.
+    __ pop(edx);
+    __ pop(edi);
+    __ pop(eax);
+    __ SmiUntag(eax);
+  }
+
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(ebx);
 }
 
-
 static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
-  __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
-  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(eax);
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(ebx);
 }
 
-
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
-  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(eax);
-}
-
-
 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
   // Checking whether the queued function is ready for install is optional,
   // since we come across interrupts and stack checks elsewhere.  However,
@@ -108,17 +111,16 @@
   __ cmp(esp, Operand::StaticVariable(stack_limit));
   __ j(above_equal, &ok, Label::kNear);
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
 
   __ bind(&ok);
   GenerateTailCallToSharedCode(masm);
 }
 
-
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
-                                           bool create_implicit_receiver) {
+                                           bool create_implicit_receiver,
+                                           bool check_derived_construct) {
   // ----------- S t a t e -------------
   //  -- eax: number of arguments
   //  -- edi: constructor function
@@ -137,148 +139,20 @@
     __ push(eax);
 
     if (create_implicit_receiver) {
-      __ push(edi);
-      __ push(edx);
+      // Allocate the new receiver object.
+      __ Push(edi);
+      __ Push(edx);
+      FastNewObjectStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(ebx, eax);
+      __ Pop(edx);
+      __ Pop(edi);
 
-      // Try to allocate the object without transitioning into C code. If any of
-      // the preconditions is not met, the code bails out to the runtime call.
-      Label rt_call, allocated;
-      if (FLAG_inline_new) {
-        // Verify that the new target is a JSFunction.
-        __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
-        __ j(not_equal, &rt_call);
-
-        // Load the initial map and verify that it is in fact a map.
-        // edx: new target
-        __ mov(eax,
-               FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
-        // Will both indicate a NULL and a Smi
-        __ JumpIfSmi(eax, &rt_call);
-        // edi: constructor
-        // eax: initial map (if proven valid below)
-        __ CmpObjectType(eax, MAP_TYPE, ebx);
-        __ j(not_equal, &rt_call);
-
-        // Fall back to runtime if the expected base constructor and base
-        // constructor differ.
-        __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
-        __ j(not_equal, &rt_call);
-
-        // Check that the constructor is not constructing a JSFunction (see
-        // comments in Runtime_NewObject in runtime.cc). In which case the
-        // initial map's instance type would be JS_FUNCTION_TYPE.
-        // edi: constructor
-        // eax: initial map
-        __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
-        __ j(equal, &rt_call);
-
-        // Now allocate the JSObject on the heap.
-        // edi: constructor
-        // eax: initial map
-        __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
-        __ shl(edi, kPointerSizeLog2);
-
-        __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-
-        Factory* factory = masm->isolate()->factory();
-
-        // Allocated the JSObject, now initialize the fields.
-        // eax: initial map
-        // ebx: JSObject (not HeapObject tagged - the actual address).
-        // edi: start of next object
-        __ mov(Operand(ebx, JSObject::kMapOffset), eax);
-        __ mov(ecx, factory->empty_fixed_array());
-        __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
-        __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
-        __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
-
-        // Add the object tag to make the JSObject real, so that we can continue
-        // and jump into the continuation code at any time from now on.
-        __ or_(ebx, Immediate(kHeapObjectTag));
-
-        // Fill all the in-object properties with the appropriate filler.
-        // ebx: JSObject (tagged)
-        // ecx: First in-object property of JSObject (not tagged)
-        __ mov(edx, factory->undefined_value());
-
-        if (!is_api_function) {
-          Label no_inobject_slack_tracking;
-
-          // The code below relies on these assumptions.
-          STATIC_ASSERT(Map::kNoSlackTracking == 0);
-          STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-          // Check if slack tracking is enabled.
-          __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
-          __ shr(esi, Map::ConstructionCounter::kShift);
-          __ j(zero, &no_inobject_slack_tracking);  // Map::kNoSlackTracking
-          __ push(esi);  // Save allocation count value.
-          // Decrease generous allocation count.
-          __ sub(FieldOperand(eax, Map::kBitField3Offset),
-                 Immediate(1 << Map::ConstructionCounter::kShift));
-
-          // Allocate object with a slack.
-          __ movzx_b(esi, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
-          __ neg(esi);
-          __ lea(esi, Operand(edi, esi, times_pointer_size, 0));
-          // esi: offset of first field after pre-allocated fields
-          if (FLAG_debug_code) {
-            __ cmp(ecx, esi);
-            __ Assert(less_equal,
-                      kUnexpectedNumberOfPreAllocatedPropertyFields);
-          }
-          __ InitializeFieldsWithFiller(ecx, esi, edx);
-
-          // To allow truncation fill the remaining fields with one pointer
-          // filler map.
-          __ mov(edx, factory->one_pointer_filler_map());
-          __ InitializeFieldsWithFiller(ecx, edi, edx);
-
-          __ pop(esi);  // Restore allocation count value before decreasing.
-          __ cmp(esi, Map::kSlackTrackingCounterEnd);
-          __ j(not_equal, &allocated);
-
-          // Push the object to the stack, and then the initial map as
-          // an argument to the runtime call.
-          __ push(ebx);
-          __ push(eax);  // initial map
-          __ CallRuntime(Runtime::kFinalizeInstanceSize);
-          __ pop(ebx);
-
-          // Continue with JSObject being successfully allocated
-          // ebx: JSObject (tagged)
-          __ jmp(&allocated);
-
-          __ bind(&no_inobject_slack_tracking);
-        }
-
-        __ InitializeFieldsWithFiller(ecx, edi, edx);
-
-        // Continue with JSObject being successfully allocated
-        // ebx: JSObject (tagged)
-        __ jmp(&allocated);
-      }
-
-      // Allocate the new receiver object using the runtime call.
-      // edx: new target
-      __ bind(&rt_call);
-      int offset = kPointerSize;
-
-      // Must restore esi (context) and edi (constructor) before calling
-      // runtime.
-      __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-      __ mov(edi, Operand(esp, offset));
-      __ push(edi);  // constructor function
-      __ push(edx);  // new target
-      __ CallRuntime(Runtime::kNewObject);
-      __ mov(ebx, eax);  // store result in ebx
-
-      // New object allocated.
-      // ebx: newly allocated object
-      __ bind(&allocated);
-
-      // Restore the parameters.
-      __ pop(edx);  // new.target
-      __ pop(edi);  // Constructor function.
+      // ----------- S t a t e -------------
+      //  -- edi: constructor function
+      //  -- ebx: newly allocated object
+      //  -- edx: new target
+      // -----------------------------------
 
       // Retrieve smi-tagged arguments count from the stack.
       __ mov(eax, Operand(esp, 0));
@@ -359,6 +233,19 @@
     // Leave construct frame.
   }
 
+  // ES6 9.2.2. Step 13+
+  // Check that the result is not a Smi, indicating that the constructor result
+  // from a derived class is neither undefined nor an Object.
+  if (check_derived_construct) {
+    Label dont_throw;
+    __ JumpIfNotSmi(eax, &dont_throw);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+    }
+    __ bind(&dont_throw);
+  }
+
   // Remove caller arguments from the stack and return.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ pop(ecx);
@@ -372,17 +259,23 @@
 
 
 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, true);
+  Generate_JSConstructStubHelper(masm, false, true, false);
 }
 
 
 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, true, true);
+  Generate_JSConstructStubHelper(masm, true, false, false);
 }
 
 
 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
-  Generate_JSConstructStubHelper(masm, false, false);
+  Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+    MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, false, true);
 }
 
 
@@ -513,10 +406,8 @@
 //   o ebp: the caller's frame pointer
 //   o esp: stack pointer (pointing to return address)
 //
-// The function builds a JS frame.  Please see JavaScriptFrameConstants in
-// frames-ia32.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame.  See InterpreterFrameConstants in
+// frames.h for its layout.
 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -528,14 +419,17 @@
   __ push(edi);  // Callee's JS function.
   __ push(edx);  // Callee's new target.
 
-  // Push zero for bytecode array offset.
-  __ push(Immediate(0));
-
   // Get the bytecode array from the function object and load the pointer to the
   // first entry into edi (InterpreterBytecodeRegister).
   __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+
+  Label load_debug_bytecode_array, bytecode_array_loaded;
+  __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+         Immediate(DebugInfo::uninitialized()));
+  __ j(not_equal, &load_debug_bytecode_array);
   __ mov(kInterpreterBytecodeArrayRegister,
          FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+  __ bind(&bytecode_array_loaded);
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -545,6 +439,11 @@
     __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Push bytecode array.
+  __ push(kInterpreterBytecodeArrayRegister);
+  // Push zero for bytecode array offset.
+  __ push(Immediate(0));
+
   // Allocate the local and temporary register file on the stack.
   {
     // Load frame size from the BytecodeArray object.
@@ -578,24 +477,9 @@
 
   // TODO(rmcilroy): List of things not currently dealt with here but done in
   // fullcodegen's prologue:
-  //  - Support profiler (specifically profiling_counter).
   //  - Call ProfileEntryHookStub when isolate has a function_entry_hook.
-  //  - Allow simulator stop operations if FLAG_stop_at is set.
   //  - Code aging of the BytecodeArray object.
 
-  // Perform stack guard check.
-  {
-    Label ok;
-    ExternalReference stack_limit =
-        ExternalReference::address_of_stack_limit(masm->isolate());
-    __ cmp(esp, Operand::StaticVariable(stack_limit));
-    __ j(above_equal, &ok);
-    __ push(kInterpreterBytecodeArrayRegister);
-    __ CallRuntime(Runtime::kStackGuard);
-    __ pop(kInterpreterBytecodeArrayRegister);
-    __ bind(&ok);
-  }
-
   // Load accumulator, register file, bytecode offset, dispatch table into
   // registers.
   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -604,10 +488,8 @@
          Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
   __ mov(kInterpreterBytecodeOffsetRegister,
          Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
-  // Since the dispatch table root might be set after builtins are generated,
-  // load directly from the roots table.
-  __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
-  __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
+                  masm->isolate())));
 
   // Push dispatch table as a stack located parameter to the bytecode handler.
   DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
@@ -625,8 +507,17 @@
   // and header removal.
   __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
   __ call(ebx);
-  __ nop();  // Ensure that return address still counts as interpreter entry
-             // trampoline.
+
+  // Even though the first bytecode handler was called, we will never return.
+  __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+  // Load debug copy of the bytecode array.
+  __ bind(&load_debug_bytecode_array);
+  Register debug_info = kInterpreterBytecodeArrayRegister;
+  __ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+  __ mov(kInterpreterBytecodeArrayRegister,
+         FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+  __ jmp(&bytecode_array_loaded);
 }
 
 
@@ -671,7 +562,8 @@
 
 
 // static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+    MacroAssembler* masm, TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- ebx : the address of the first argument to be pushed. Subsequent
@@ -694,7 +586,9 @@
 
   // Call the target.
   __ Push(edx);  // Re-push return address.
-  __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            tail_call_mode),
+          RelocInfo::CODE_TARGET);
 }
 
 
@@ -739,33 +633,16 @@
 }
 
 
-static void Generate_InterpreterNotifyDeoptimizedHelper(
-    MacroAssembler* masm, Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(kInterpreterAccumulatorRegister);  // Save accumulator register.
-
-    // Pass the deoptimization type to the runtime system.
-    __ Push(Smi::FromInt(static_cast<int>(type)));
-
-    __ CallRuntime(Runtime::kNotifyDeoptimized);
-
-    __ Pop(kInterpreterAccumulatorRegister);  // Restore accumulator register.
-    // Tear down internal frame.
-  }
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
   // Initialize register file register.
   __ mov(kInterpreterRegisterFileRegister, ebp);
   __ add(kInterpreterRegisterFileRegister,
          Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
 
   // Get the bytecode array pointer from the frame.
-  __ mov(ebx, Operand(kInterpreterRegisterFileRegister,
-                      InterpreterFrameConstants::kFunctionFromRegisterPointer));
-  __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
   __ mov(kInterpreterBytecodeArrayRegister,
-         FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+         Operand(kInterpreterRegisterFileRegister,
+                 InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
 
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
@@ -782,12 +659,13 @@
               InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
 
-  // Push dispatch table as a stack located parameter to the bytecode handler -
-  // overwrite the state slot (we don't use these for interpreter deopts).
-  __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
-  __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+  // Push dispatch table as a stack located parameter to the bytecode handler.
+  __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
+                  masm->isolate())));
   DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
-  __ mov(Operand(esp, kPointerSize), ebx);
+  __ Pop(esi);
+  __ Push(ebx);
+  __ Push(esi);
 
   // Dispatch to the target bytecode.
   __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
@@ -795,8 +673,6 @@
   __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
 
   // Get the context from the frame.
-  // TODO(rmcilroy): Update interpreter frame to expect current context at the
-  // context slot instead of the function context.
   __ mov(kContextRegister,
          Operand(kInterpreterRegisterFileRegister,
                  InterpreterFrameConstants::kContextFromRegisterPointer));
@@ -808,6 +684,32 @@
 }
 
 
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+    MacroAssembler* masm, Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Pass the deoptimization type to the runtime system.
+    __ Push(Smi::FromInt(static_cast<int>(type)));
+    __ CallRuntime(Runtime::kNotifyDeoptimized);
+    // Tear down internal frame.
+  }
+
+  // Drop state (we don't use these for interpreter deopts) and and pop the
+  // accumulator value into the accumulator register and push PC at top
+  // of stack (to simulate initial call to bytecode handler in interpreter entry
+  // trampoline).
+  __ Pop(ebx);
+  __ Drop(1);
+  __ Pop(kInterpreterAccumulatorRegister);
+  __ Push(ebx);
+
+  // Enter the bytecode dispatch.
+  Generate_EnterBytecodeDispatch(masm);
+}
+
+
 void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
 }
@@ -822,22 +724,30 @@
   Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
 }
 
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+  // Set the address of the interpreter entry trampoline as a return address.
+  // This simulates the initial call to bytecode handlers in interpreter entry
+  // trampoline. The return will never actually be taken, but our stack walker
+  // uses this address to determine whether a frame is interpreted.
+  __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
+
+  Generate_EnterBytecodeDispatch(masm);
+}
+
 
 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
 }
 
 
 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm,
+                                 Runtime::kCompileOptimized_NotConcurrent);
 }
 
 
 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
-  GenerateTailCallToReturnedCode(masm);
+  GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
 }
 
 
@@ -1375,6 +1285,140 @@
 
 
 // static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+  // ----------- S t a t e -------------
+  //  -- eax                 : number of arguments
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- esp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+  Condition const cc = (kind == MathMaxMinKind::kMin) ? below : above;
+  Heap::RootListIndex const root_index =
+      (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+                                     : Heap::kMinusInfinityValueRootIndex;
+  const int reg_sel = (kind == MathMaxMinKind::kMin) ? 1 : 0;
+
+  // Load the accumulator with the default return value (either -Infinity or
+  // +Infinity), with the tagged value in edx and the double value in stx_0.
+  __ LoadRoot(edx, root_index);
+  __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
+  __ Move(ecx, eax);
+
+  Label done_loop, loop;
+  __ bind(&loop);
+  {
+    // Check if all parameters done.
+    __ test(ecx, ecx);
+    __ j(zero, &done_loop);
+
+    // Load the next parameter tagged value into ebx.
+    __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
+
+    // Load the double value of the parameter into stx_1, maybe converting the
+    // parameter to a number first using the ToNumberStub if necessary.
+    Label convert, convert_smi, convert_number, done_convert;
+    __ bind(&convert);
+    __ JumpIfSmi(ebx, &convert_smi);
+    __ JumpIfRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+                  Heap::kHeapNumberMapRootIndex, &convert_number);
+    {
+      // Parameter is not a Number, use the ToNumberStub to convert it.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(eax);
+      __ SmiTag(ecx);
+      __ Push(eax);
+      __ Push(ecx);
+      __ Push(edx);
+      __ mov(eax, ebx);
+      ToNumberStub stub(masm->isolate());
+      __ CallStub(&stub);
+      __ mov(ebx, eax);
+      __ Pop(edx);
+      __ Pop(ecx);
+      __ Pop(eax);
+      {
+        // Restore the double accumulator value (stX_0).
+        Label restore_smi, done_restore;
+        __ JumpIfSmi(edx, &restore_smi, Label::kNear);
+        __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
+        __ jmp(&done_restore, Label::kNear);
+        __ bind(&restore_smi);
+        __ SmiUntag(edx);
+        __ push(edx);
+        __ fild_s(Operand(esp, 0));
+        __ pop(edx);
+        __ SmiTag(edx);
+        __ bind(&done_restore);
+      }
+      __ SmiUntag(ecx);
+      __ SmiUntag(eax);
+    }
+    __ jmp(&convert);
+    __ bind(&convert_number);
+    // Load another value into stx_1
+    __ fld_d(FieldOperand(ebx, HeapNumber::kValueOffset));
+    __ fxch();
+    __ jmp(&done_convert, Label::kNear);
+    __ bind(&convert_smi);
+    __ SmiUntag(ebx);
+    __ push(ebx);
+    __ fild_s(Operand(esp, 0));
+    __ pop(ebx);
+    __ fxch();
+    __ SmiTag(ebx);
+    __ bind(&done_convert);
+
+    // Perform the actual comparison with the accumulator value on the left hand
+    // side (stx_0) and the next parameter value on the right hand side (stx_1).
+    Label compare_equal, compare_nan, compare_swap, done_compare;
+
+    // Duplicates the 2 float data for FCmp
+    __ fld(1);
+    __ fld(1);
+    __ FCmp();
+    __ j(parity_even, &compare_nan, Label::kNear);
+    __ j(cc, &done_compare, Label::kNear);
+    __ j(equal, &compare_equal, Label::kNear);
+
+    // Result is on the right hand side(stx_0).
+    __ bind(&compare_swap);
+    __ fxch();
+    __ mov(edx, ebx);
+    __ jmp(&done_compare, Label::kNear);
+
+    // At least one side is NaN, which means that the result will be NaN too.
+    __ bind(&compare_nan);
+    // Set the result on the right hand side (stx_0) to nan
+    __ fstp(0);
+    __ LoadRoot(edx, Heap::kNanValueRootIndex);
+    __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
+    __ jmp(&done_compare, Label::kNear);
+
+    // Left and right hand side are equal, check for -0 vs. +0.
+    __ bind(&compare_equal);
+    // Check the sign of the value in reg_sel
+    __ fld(reg_sel);
+    __ FXamSign();
+    __ j(not_zero, &compare_swap);
+
+    __ bind(&done_compare);
+    // The right result is on the right hand side(stx_0)
+    // and can remove the useless stx_1 now.
+    __ fxch();
+    __ fstp(0);
+    __ dec(ecx);
+    __ jmp(&loop);
+  }
+
+  __ bind(&done_loop);
+  __ PopReturnAddressTo(ecx);
+  __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+  __ PushReturnAddressFrom(ecx);
+  __ mov(eax, edx);
+  __ Ret();
+}
+
+// static
 void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax                 : number of arguments
@@ -1472,9 +1516,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(ebx);  // the first argument
-    __ Push(edi);  // constructor function
-    __ Push(edx);  // new target
-    __ CallRuntime(Runtime::kNewObject);
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(FieldOperand(eax, JSValue::kValueOffset));
   }
   __ Ret();
@@ -1606,9 +1649,8 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(ebx);  // the first argument
-    __ Push(edi);  // constructor function
-    __ Push(edx);  // new target
-    __ CallRuntime(Runtime::kNewObject);
+    FastNewObjectStub stub(masm->isolate());
+    __ CallStub(&stub);
     __ Pop(FieldOperand(eax, JSValue::kValueOffset));
   }
   __ Ret();
@@ -1724,9 +1766,7 @@
 
     // Try to create the list from an arguments object.
     __ bind(&create_arguments);
-    __ mov(ebx,
-           FieldOperand(eax, JSObject::kHeaderSize +
-                                 Heap::kArgumentsLengthIndex * kPointerSize));
+    __ mov(ebx, FieldOperand(eax, JSArgumentsObject::kLengthOffset));
     __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
     __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
     __ j(not_equal, &create_runtime);
@@ -1815,10 +1855,138 @@
   }
 }
 
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// |  ...
+// |  g()'s arg M
+// |  ...
+// |  g()'s arg 1
+// |  g()'s receiver arg
+// |  g()'s caller pc
+// ------- g()'s frame: -------
+// |  g()'s caller fp      <- fp
+// |  g()'s context
+// |  function pointer: g
+// |  -------------------------
+// |  ...
+// |  ...
+// |  f()'s arg N
+// |  ...
+// |  f()'s arg 1
+// |  f()'s receiver arg
+// |  f()'s caller pc      <- sp
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+                        Register scratch1, Register scratch2,
+                        Register scratch3) {
+  DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+  Comment cmnt(masm, "[ PrepareForTailCall");
+
+  // Prepare for tail call only if the debugger is not active.
+  Label done;
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(masm->isolate());
+  __ movzx_b(scratch1, Operand::StaticVariable(debug_is_active));
+  __ cmp(scratch1, Immediate(0));
+  __ j(not_equal, &done, Label::kNear);
+
+  // Drop possible interpreter handler/stub frame.
+  {
+    Label no_interpreter_frame;
+    __ cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+           Immediate(Smi::FromInt(StackFrame::STUB)));
+    __ j(not_equal, &no_interpreter_frame, Label::kNear);
+    __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&no_interpreter_frame);
+  }
+
+  // Check if next frame is an arguments adaptor frame.
+  Label no_arguments_adaptor, formal_parameter_count_loaded;
+  __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+  // Drop arguments adaptor frame and load arguments count.
+  __ mov(ebp, scratch2);
+  __ mov(scratch1, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(scratch1);
+  __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+  __ bind(&no_arguments_adaptor);
+  // Load caller's formal parameter count
+  __ mov(scratch1, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(scratch1,
+         FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(
+      scratch1,
+      FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ SmiUntag(scratch1);
+
+  __ bind(&formal_parameter_count_loaded);
+
+  // Calculate the destination address where we will put the return address
+  // after we drop current frame.
+  Register new_sp_reg = scratch2;
+  __ sub(scratch1, args_reg);
+  __ lea(new_sp_reg, Operand(ebp, scratch1, times_pointer_size,
+                             StandardFrameConstants::kCallerPCOffset));
+
+  if (FLAG_debug_code) {
+    __ cmp(esp, new_sp_reg);
+    __ Check(below, kStackAccessBelowStackPointer);
+  }
+
+  // Copy receiver and return address as well.
+  Register count_reg = scratch1;
+  __ lea(count_reg, Operand(args_reg, 2));
+
+  // Copy return address from caller's frame to current frame's return address
+  // to avoid its trashing and let the following loop copy it to the right
+  // place.
+  Register tmp_reg = scratch3;
+  __ mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+  __ mov(Operand(esp, 0), tmp_reg);
+
+  // Restore caller's frame pointer now as it could be overwritten by
+  // the copying loop.
+  __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+  Operand src(esp, count_reg, times_pointer_size, 0);
+  Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
+
+  // Now copy callee arguments to the caller frame going backwards to avoid
+  // callee arguments corruption (source and destination areas could overlap).
+  Label loop, entry;
+  __ jmp(&entry, Label::kNear);
+  __ bind(&loop);
+  __ dec(count_reg);
+  __ mov(tmp_reg, src);
+  __ mov(dst, tmp_reg);
+  __ bind(&entry);
+  __ cmp(count_reg, Immediate(0));
+  __ j(not_equal, &loop, Label::kNear);
+
+  // Leave current frame.
+  __ mov(esp, new_sp_reg);
+
+  __ bind(&done);
+}
+}  // namespace
 
 // static
 void Builtins::Generate_CallFunction(MacroAssembler* masm,
-                                     ConvertReceiverMode mode) {
+                                     ConvertReceiverMode mode,
+                                     TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- edi : the function to call (checked to be a JSFunction)
@@ -1907,6 +2075,12 @@
   //  -- esi : the function context.
   // -----------------------------------
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+    // Reload shared function info.
+    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  }
+
   __ mov(ebx,
          FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
   __ SmiUntag(ebx);
@@ -2012,13 +2186,18 @@
 
 
 // static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+                                              TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- edi : the function to call (checked to be a JSBoundFunction)
   // -----------------------------------
   __ AssertBoundFunction(edi);
 
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+  }
+
   // Patch the receiver to [[BoundThis]].
   __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
   __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
@@ -2036,7 +2215,8 @@
 
 
 // static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+                             TailCallMode tail_call_mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- edi : the target to call (can be any Object).
@@ -2046,14 +2226,24 @@
   __ JumpIfSmi(edi, &non_callable);
   __ bind(&non_smi);
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
+  __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
        RelocInfo::CODE_TARGET);
   __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
-  __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
+  __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
        RelocInfo::CODE_TARGET);
+
+  // Check if target has a [[Call]] internal method.
+  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+  __ j(zero, &non_callable);
+
   __ CmpInstanceType(ecx, JS_PROXY_TYPE);
   __ j(not_equal, &non_function);
 
+  // 0. Prepare for tail call if necessary.
+  if (tail_call_mode == TailCallMode::kAllow) {
+    PrepareForTailCall(masm, eax, ebx, ecx, edx);
+  }
+
   // 1. Runtime fallback for Proxy [[Call]].
   __ PopReturnAddressTo(ecx);
   __ Push(edi);
@@ -2068,15 +2258,12 @@
   // 2. Call to something else, which might have a [[Call]] internal method (if
   // not we raise an exception).
   __ bind(&non_function);
-  // Check if target has a [[Call]] internal method.
-  __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
-  __ j(zero, &non_callable, Label::kNear);
   // Overwrite the original receiver with the (original) target.
   __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
   // Let the "call_as_function_delegate" take care of the rest.
   __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
   __ Jump(masm->isolate()->builtins()->CallFunction(
-              ConvertReceiverMode::kNotNullOrUndefined),
+              ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
           RelocInfo::CODE_TARGET);
 
   // 3. Call to something that is not callable.
@@ -2394,14 +2581,12 @@
   // Load the next prototype.
   __ bind(&next_prototype);
   __ mov(receiver, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
-  // End if the prototype is null or not hidden.
-  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
-  __ j(equal, receiver_check_failed);
-  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ test(FieldOperand(scratch0, Map::kBitField3Offset),
-          Immediate(Map::IsHiddenPrototype::kMask));
+  __ test(FieldOperand(receiver, Map::kBitField3Offset),
+          Immediate(Map::HasHiddenPrototype::kMask));
   __ j(zero, receiver_check_failed);
+
+  __ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
+  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
   // Iterate.
   __ jmp(&prototype_loop_start, Label::kNear);
 
diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc
index 1da5f41..ff6c8d2 100644
--- a/src/x87/code-stubs-x87.cc
+++ b/src/x87/code-stubs-x87.cc
@@ -327,7 +327,6 @@
 void MathPowStub::Generate(MacroAssembler* masm) {
   const Register base = edx;
   const Register scratch = ecx;
-  Counters* counters = isolate()->counters();
   Label call_runtime;
 
   // We will call runtime helper function directly.
@@ -340,7 +339,6 @@
     // as heap number in exponent.
     __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
     __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-    __ IncrementCounter(counters->math_pow(), 1);
     __ ret(2 * kPointerSize);
   } else {
     // Currently it's only called from full-compiler and exponent type is
@@ -431,456 +429,6 @@
 }
 
 
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  // The key is in edx and the parameter count is in eax.
-  DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
-  DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
-  // The displacement is used for skipping the frame pointer on the
-  // stack. It is the offset of the last parameter (if any) relative
-  // to the frame pointer.
-  static const int kDisplacement = 1 * kPointerSize;
-
-  // Check that the key is a smi.
-  Label slow;
-  __ JumpIfNotSmi(edx, &slow, Label::kNear);
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor;
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adaptor, Label::kNear);
-
-  // Check index against formal parameters count limit passed in
-  // through register eax. Use unsigned comparison to get negative
-  // check for free.
-  __ cmp(edx, eax);
-  __ j(above_equal, &slow, Label::kNear);
-
-  // Read the argument from the stack and return it.
-  STATIC_ASSERT(kSmiTagSize == 1);
-  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
-  __ lea(ebx, Operand(ebp, eax, times_2, 0));
-  __ neg(edx);
-  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
-  __ ret(0);
-
-  // Arguments adaptor case: Check index against actual arguments
-  // limit found in the arguments adaptor frame. Use unsigned
-  // comparison to get negative check for free.
-  __ bind(&adaptor);
-  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ cmp(edx, ecx);
-  __ j(above_equal, &slow, Label::kNear);
-
-  // Read the argument from the stack and return it.
-  STATIC_ASSERT(kSmiTagSize == 1);
-  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
-  __ lea(ebx, Operand(ebx, ecx, times_2, 0));
-  __ neg(edx);
-  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
-  __ ret(0);
-
-  // Slow-case: Handle non-smi or out-of-bounds access to arguments
-  // by calling the runtime system.
-  __ bind(&slow);
-  __ pop(ebx);  // Return address.
-  __ push(edx);
-  __ push(ebx);
-  __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
-  // ecx : number of parameters (tagged)
-  // edx : parameters pointer
-  // edi : function
-  // esp[0] : return address
-
-  DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &runtime, Label::kNear);
-
-  // Patch the arguments.length and the parameters pointer.
-  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ lea(edx,
-         Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
-  __ bind(&runtime);
-  __ pop(eax);   // Pop return address.
-  __ push(edi);  // Push function.
-  __ push(edx);  // Push parameters pointer.
-  __ push(ecx);  // Push parameter count.
-  __ push(eax);  // Push return address.
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
-  // ecx : number of parameters (tagged)
-  // edx : parameters pointer
-  // edi : function
-  // esp[0] : return address
-
-  DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adaptor_frame, Label::kNear);
-
-  // No adaptor, parameter count = argument count.
-  __ mov(ebx, ecx);
-  __ push(ecx);
-  __ jmp(&try_allocate, Label::kNear);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ mov(ebx, ecx);
-  __ push(ecx);
-  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ lea(edx, Operand(edx, ecx, times_2,
-                      StandardFrameConstants::kCallerSPOffset));
-
-  // ebx = parameter count (tagged)
-  // ecx = argument count (smi-tagged)
-  // Compute the mapped parameter count = min(ebx, ecx) in ebx.
-  __ cmp(ebx, ecx);
-  __ j(less_equal, &try_allocate, Label::kNear);
-  __ mov(ebx, ecx);
-
-  // Save mapped parameter count and function.
-  __ bind(&try_allocate);
-  __ push(edi);
-  __ push(ebx);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  Label no_parameter_map;
-  __ test(ebx, ebx);
-  __ j(zero, &no_parameter_map, Label::kNear);
-  __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
-  __ bind(&no_parameter_map);
-
-  // 2. Backing store.
-  __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
-
-  // eax = address of new object(s) (tagged)
-  // ecx = argument count (smi-tagged)
-  // esp[0] = mapped parameter count (tagged)
-  // esp[4] = function
-  // esp[8] = parameter count (tagged)
-  // Get the arguments map from the current native context into edi.
-  Label has_mapped_parameters, instantiate;
-  __ mov(edi, NativeContextOperand());
-  __ mov(ebx, Operand(esp, 0 * kPointerSize));
-  __ test(ebx, ebx);
-  __ j(not_zero, &has_mapped_parameters, Label::kNear);
-  __ mov(
-      edi,
-      Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
-  __ jmp(&instantiate, Label::kNear);
-
-  __ bind(&has_mapped_parameters);
-  __ mov(edi, Operand(edi, Context::SlotOffset(
-                               Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
-  __ bind(&instantiate);
-
-  // eax = address of new object (tagged)
-  // ebx = mapped parameter count (tagged)
-  // ecx = argument count (smi-tagged)
-  // edi = address of arguments map (tagged)
-  // esp[0] = mapped parameter count (tagged)
-  // esp[4] = function
-  // esp[8] = parameter count (tagged)
-  // Copy the JS object part.
-  __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
-  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-
-  // Set up the callee in-object property.
-  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
-  __ mov(edi, Operand(esp, 1 * kPointerSize));
-  __ AssertNotSmi(edi);
-  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
-                               Heap::kArgumentsCalleeIndex * kPointerSize),
-         edi);
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(ecx);
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
-                      Heap::kArgumentsLengthIndex * kPointerSize),
-         ecx);
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, edi will point there, otherwise to the
-  // backing store.
-  __ lea(edi, Operand(eax, Heap::kSloppyArgumentsObjectSize));
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
-
-  // eax = address of new object (tagged)
-  // ebx = mapped parameter count (tagged)
-  // ecx = argument count (tagged)
-  // edx = address of receiver argument
-  // edi = address of parameter map or backing store (tagged)
-  // esp[0] = mapped parameter count (tagged)
-  // esp[4] = function
-  // esp[8] = parameter count (tagged)
-  // Free two registers.
-  __ push(edx);
-  __ push(eax);
-
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ test(ebx, ebx);
-  __ j(zero, &skip_parameter_map);
-
-  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
-         Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
-  __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
-  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
-  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
-  __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
-  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop, parameters_test;
-  __ push(ecx);
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
-  __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
-  __ add(ebx, Operand(esp, 5 * kPointerSize));
-  __ sub(ebx, eax);
-  __ mov(ecx, isolate()->factory()->the_hole_value());
-  __ mov(edx, edi);
-  __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
-  // eax = loop variable (tagged)
-  // ebx = mapping index (tagged)
-  // ecx = the hole value
-  // edx = address of parameter map (tagged)
-  // edi = address of backing store (tagged)
-  // esp[0] = argument count (tagged)
-  // esp[4] = address of new object (tagged)
-  // esp[8] = address of receiver argument
-  // esp[12] = mapped parameter count (tagged)
-  // esp[16] = function
-  // esp[20] = parameter count (tagged)
-  __ jmp(&parameters_test, Label::kNear);
-
-  __ bind(&parameters_loop);
-  __ sub(eax, Immediate(Smi::FromInt(1)));
-  __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
-  __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
-  __ add(ebx, Immediate(Smi::FromInt(1)));
-  __ bind(&parameters_test);
-  __ test(eax, eax);
-  __ j(not_zero, &parameters_loop, Label::kNear);
-  __ pop(ecx);
-
-  __ bind(&skip_parameter_map);
-
-  // ecx = argument count (tagged)
-  // edi = address of backing store (tagged)
-  // esp[0] = address of new object (tagged)
-  // esp[4] = address of receiver argument
-  // esp[8] = mapped parameter count (tagged)
-  // esp[12] = function
-  // esp[16] = parameter count (tagged)
-  // Copy arguments header and remaining slots (if there are any).
-  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
-         Immediate(isolate()->factory()->fixed_array_map()));
-  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
-  Label arguments_loop, arguments_test;
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));
-  __ mov(edx, Operand(esp, 1 * kPointerSize));
-  __ sub(edx, ebx);  // Is there a smarter way to do negative scaling?
-  __ sub(edx, ebx);
-  __ jmp(&arguments_test, Label::kNear);
-
-  __ bind(&arguments_loop);
-  __ sub(edx, Immediate(kPointerSize));
-  __ mov(eax, Operand(edx, 0));
-  __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
-  __ add(ebx, Immediate(Smi::FromInt(1)));
-
-  __ bind(&arguments_test);
-  __ cmp(ebx, ecx);
-  __ j(less, &arguments_loop, Label::kNear);
-
-  // Restore.
-  __ pop(eax);  // Address of arguments object.
-  __ Drop(4);
-
-  // Return.
-  __ ret(0);
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ pop(eax);   // Remove saved mapped parameter count.
-  __ pop(edi);   // Pop saved function.
-  __ pop(eax);   // Remove saved parameter count.
-  __ pop(eax);   // Pop return address.
-  __ push(edi);  // Push function.
-  __ push(edx);  // Push parameters pointer.
-  __ push(ecx);  // Push parameter count.
-  __ push(eax);  // Push return address.
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
-  // ecx : number of parameters (tagged)
-  // edx : parameters pointer
-  // edi : function
-  // esp[0] : return address
-
-  DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
-  DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
-  DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label try_allocate, runtime;
-  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &try_allocate, Label::kNear);
-
-  // Patch the arguments.length and the parameters pointer.
-  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ lea(edx,
-         Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
-  // Try the new space allocation. Start out with computing the size of
-  // the arguments object and the elements array.
-  Label add_arguments_object;
-  __ bind(&try_allocate);
-  __ mov(eax, ecx);
-  __ test(eax, eax);
-  __ j(zero, &add_arguments_object, Label::kNear);
-  __ lea(eax, Operand(eax, times_2, FixedArray::kHeaderSize));
-  __ bind(&add_arguments_object);
-  __ add(eax, Immediate(Heap::kStrictArgumentsObjectSize));
-
-  // Do the allocation of both objects in one go.
-  __ Allocate(eax, eax, ebx, no_reg, &runtime, TAG_OBJECT);
-
-  // Get the arguments map from the current native context.
-  __ mov(edi, NativeContextOperand());
-  __ mov(edi, ContextOperand(edi, Context::STRICT_ARGUMENTS_MAP_INDEX));
-
-  __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
-  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-
-  // Get the length (smi tagged) and set that as an in-object property too.
-  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
-  __ AssertSmi(ecx);
-  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
-                      Heap::kArgumentsLengthIndex * kPointerSize),
-         ecx);
-
-  // If there are no actual arguments, we're done.
-  Label done;
-  __ test(ecx, ecx);
-  __ j(zero, &done, Label::kNear);
-
-  // Set up the elements pointer in the allocated arguments object and
-  // initialize the header in the elements fixed array.
-  __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
-  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
-         Immediate(isolate()->factory()->fixed_array_map()));
-  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
-  // Untag the length for the loop below.
-  __ SmiUntag(ecx);
-
-  // Copy the fixed array slots.
-  Label loop;
-  __ bind(&loop);
-  __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
-  __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
-  __ add(edi, Immediate(kPointerSize));
-  __ sub(edx, Immediate(kPointerSize));
-  __ dec(ecx);
-  __ j(not_zero, &loop);
-
-  // Return.
-  __ bind(&done);
-  __ ret(0);
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ pop(eax);   // Pop return address.
-  __ push(edi);  // Push function.
-  __ push(edx);  // Push parameters pointer.
-  __ push(ecx);  // Push parameter count.
-  __ push(eax);  // Push return address.
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
-  // ecx : number of parameters (tagged)
-  // edx : parameters pointer
-  // ebx : rest parameter index (tagged)
-  // esp[0] : return address
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label runtime;
-  __ mov(edi, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(edi, StandardFrameConstants::kContextOffset));
-  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &runtime);
-
-  // Patch the arguments.length and the parameters pointer.
-  __ mov(ecx, Operand(edi, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ lea(edx,
-         Operand(edi, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
-  __ bind(&runtime);
-  __ pop(eax);   // Save return address.
-  __ push(ecx);  // Push number of parameters.
-  __ push(edx);  // Push parameters pointer.
-  __ push(ebx);  // Push rest parameter index.
-  __ push(eax);  // Push return address.
-  __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -952,39 +500,37 @@
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ JumpIfSmi(eax, &runtime);
   __ mov(edx, eax);  // Make a copy of the original subject string.
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
 
   // eax: subject string
   // edx: subject string
-  // ebx: subject string instance type
   // ecx: RegExp data (FixedArray)
   // Handle subject string according to its encoding and representation:
   // (1) Sequential two byte?  If yes, go to (9).
-  // (2) Sequential one byte?  If yes, go to (6).
-  // (3) Anything but sequential or cons?  If yes, go to (7).
-  // (4) Cons string.  If the string is flat, replace subject with first string.
-  //     Otherwise bailout.
-  // (5a) Is subject sequential two byte?  If yes, go to (9).
-  // (5b) Is subject external?  If yes, go to (8).
-  // (6) One byte sequential.  Load regexp code for one byte.
+  // (2) Sequential one byte?  If yes, go to (5).
+  // (3) Sequential or cons?  If not, go to (6).
+  // (4) Cons string.  If the string is flat, replace subject with first string
+  //     and go to (1). Otherwise bail out to runtime.
+  // (5) One byte sequential.  Load regexp code for one byte.
   // (E) Carry on.
   /// [...]
 
   // Deferred code at the end of the stub:
-  // (7) Not a long external string?  If yes, go to (10).
-  // (8) External string.  Make it, offset-wise, look like a sequential string.
-  // (8a) Is the external string one byte?  If yes, go to (6).
-  // (9) Two byte sequential.  Load regexp code for one byte. Go to (E).
+  // (6) Long external string?  If not, go to (10).
+  // (7) External string.  Make it, offset-wise, look like a sequential string.
+  // (8) Is the external string one byte?  If yes, go to (5).
+  // (9) Two byte sequential.  Load regexp code for two byte. Go to (E).
   // (10) Short external string or not a string?  If yes, bail out to runtime.
-  // (11) Sliced string.  Replace subject with parent. Go to (5a).
+  // (11) Sliced string.  Replace subject with parent. Go to (1).
 
-  Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
-        external_string /* 8 */, check_underlying /* 5a */,
-        not_seq_nor_cons /* 7 */, check_code /* E */,
-        not_long_external /* 10 */;
+  Label seq_one_byte_string /* 5 */, seq_two_byte_string /* 9 */,
+      external_string /* 7 */, check_underlying /* 1 */,
+      not_seq_nor_cons /* 6 */, check_code /* E */, not_long_external /* 10 */;
 
+  __ bind(&check_underlying);
   // (1) Sequential two byte?  If yes, go to (9).
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
   __ and_(ebx, kIsNotStringMask |
                kStringRepresentationMask |
                kStringEncodingMask |
@@ -992,14 +538,14 @@
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string);  // Go to (9).
 
-  // (2) Sequential one byte?  If yes, go to (6).
+  // (2) Sequential one byte?  If yes, go to (5).
   // Any other sequential string must be one byte.
   __ and_(ebx, Immediate(kIsNotStringMask |
                          kStringRepresentationMask |
                          kShortExternalStringMask));
-  __ j(zero, &seq_one_byte_string, Label::kNear);  // Go to (6).
+  __ j(zero, &seq_one_byte_string, Label::kNear);  // Go to (5).
 
-  // (3) Anything but sequential or cons?  If yes, go to (7).
+  // (3) Sequential or cons?  If not, go to (6).
   // We check whether the subject string is a cons, since sequential strings
   // have already been covered.
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
@@ -1007,32 +553,19 @@
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmp(ebx, Immediate(kExternalStringTag));
-  __ j(greater_equal, &not_seq_nor_cons);  // Go to (7).
+  __ j(greater_equal, &not_seq_nor_cons);  // Go to (6).
 
   // (4) Cons string.  Check that it's flat.
   // Replace subject with first string and reload instance type.
   __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
   __ j(not_equal, &runtime);
   __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
-  __ bind(&check_underlying);
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
-  // (5a) Is subject sequential two byte?  If yes, go to (9).
-  __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask);
-  STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
-  __ j(zero, &seq_two_byte_string);  // Go to (9).
-  // (5b) Is subject external?  If yes, go to (8).
-  __ test_b(ebx, kStringRepresentationMask);
-  // The underlying external string is never a short external string.
-  STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
-  STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
-  __ j(not_zero, &external_string);  // Go to (8).
+  __ jmp(&check_underlying);
 
   // eax: sequential subject string (or look-alike, external string)
   // edx: original subject string
   // ecx: RegExp data (FixedArray)
-  // (6) One byte sequential.  Load regexp code for one byte.
+  // (5) One byte sequential.  Load regexp code for one byte.
   __ bind(&seq_one_byte_string);
   // Load previous index and check range before edx is overwritten.  We have
   // to use edx instead of eax here because it might have been only made to
@@ -1253,12 +786,12 @@
   __ TailCallRuntime(Runtime::kRegExpExec);
 
   // Deferred code for string handling.
-  // (7) Not a long external string?  If yes, go to (10).
+  // (6) Long external string?  If not, go to (10).
   __ bind(&not_seq_nor_cons);
   // Compare flags are still set from (3).
   __ j(greater, &not_long_external, Label::kNear);  // Go to (10).
 
-  // (8) External string.  Short external strings have been ruled out.
+  // (7) External string.  Short external strings have been ruled out.
   __ bind(&external_string);
   // Reload instance type.
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -1274,14 +807,14 @@
   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  // (8a) Is the external string one byte?  If yes, go to (6).
+  // (8) Is the external string one byte?  If yes, go to (5).
   __ test_b(ebx, kStringEncodingMask);
-  __ j(not_zero, &seq_one_byte_string);  // Goto (6).
+  __ j(not_zero, &seq_one_byte_string);  // Go to (5).
 
   // eax: sequential subject string (or look-alike, external string)
   // edx: original subject string
   // ecx: RegExp data (FixedArray)
-  // (9) Two byte sequential.  Load regexp code for one byte. Go to (E).
+  // (9) Two byte sequential.  Load regexp code for two byte. Go to (E).
   __ bind(&seq_two_byte_string);
   // Load previous index and check range before edx is overwritten.  We have
   // to use edx instead of eax here because it might have been only made to
@@ -1301,11 +834,11 @@
   __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
   __ j(not_zero, &runtime);
 
-  // (11) Sliced string.  Replace subject with parent.  Go to (5a).
+  // (11) Sliced string.  Replace subject with parent.  Go to (1).
   // Load offset into edi and replace subject string with parent.
   __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
   __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
-  __ jmp(&check_underlying);  // Go to (5a).
+  __ jmp(&check_underlying);  // Go to (1).
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -1384,16 +917,11 @@
       // Check for undefined.  undefined OP undefined is false even though
       // undefined == undefined.
       __ cmp(edx, isolate()->factory()->undefined_value());
-      if (is_strong(strength())) {
-        // In strong mode, this comparison must throw, so call the runtime.
-        __ j(equal, &runtime_call, Label::kFar);
-      } else {
-        Label check_for_nan;
-        __ j(not_equal, &check_for_nan, Label::kNear);
-        __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
-        __ ret(0);
-        __ bind(&check_for_nan);
-      }
+      Label check_for_nan;
+      __ j(not_equal, &check_for_nan, Label::kNear);
+      __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+      __ ret(0);
+      __ bind(&check_for_nan);
     }
 
     // Test for NaN. Compare heap numbers in a general way,
@@ -1413,12 +941,6 @@
       // Call runtime on identical SIMD values since we must throw a TypeError.
       __ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE));
       __ j(equal, &runtime_call, Label::kFar);
-      if (is_strong(strength())) {
-        // We have already tested for smis and heap numbers, so if both
-        // arguments are not strings we must proceed to the slow case.
-        __ test(ecx, Immediate(kIsNotStringMask));
-        __ j(not_zero, &runtime_call, Label::kFar);
-      }
     }
     __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
     __ ret(0);
@@ -1575,7 +1097,7 @@
     // Non-strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
-    Label return_unequal;
+    Label return_unequal, undetectable;
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
@@ -1584,48 +1106,58 @@
     __ lea(ecx, Operand(eax, edx, times_1, 0));
     __ test(ecx, Immediate(kSmiTagMask));
     __ j(not_zero, &runtime_call, Label::kNear);
-    __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+
+    __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+    __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+
+    __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(not_zero, &undetectable, Label::kNear);
+    __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(not_zero, &return_unequal, Label::kNear);
+
+    __ CmpInstanceType(ebx, FIRST_JS_RECEIVER_TYPE);
     __ j(below, &runtime_call, Label::kNear);
-    __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ebx);
+    __ CmpInstanceType(ecx, FIRST_JS_RECEIVER_TYPE);
     __ j(below, &runtime_call, Label::kNear);
-    // We do not bail out after this point.  Both are JSObjects, and
-    // they are equal if and only if both are undetectable.
-    // The and of the undetectable flags is 1 if and only if they are equal.
+
+    __ bind(&return_unequal);
+    // Return non-equal by returning the non-zero object pointer in eax.
+    __ ret(0);  // eax, edx were pushed
+
+    __ bind(&undetectable);
     __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
               1 << Map::kIsUndetectable);
     __ j(zero, &return_unequal, Label::kNear);
-    __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
-    __ j(zero, &return_unequal, Label::kNear);
-    // The objects are both undetectable, so they both compare as the value
-    // undefined, and are equal.
     __ Move(eax, Immediate(EQUAL));
-    __ bind(&return_unequal);
-    // Return non-equal by returning the non-zero object pointer in eax,
-    // or return equal if we fell through to here.
-    __ ret(0);  // rax, rdx were pushed
+    __ ret(0);  // eax, edx were pushed
   }
   __ bind(&runtime_call);
 
-  // Push arguments below the return address.
-  __ pop(ecx);
-  __ push(edx);
-  __ push(eax);
-
-  // Figure out which native to call and setup the arguments.
   if (cc == equal) {
-    __ push(ecx);
-    __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(edx);
+      __ Push(eax);
+      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+    }
+    // Turn true into 0 and false into some non-zero value.
+    STATIC_ASSERT(EQUAL == 0);
+    __ sub(eax, Immediate(isolate()->factory()->true_value()));
+    __ Ret();
   } else {
+    // Push arguments below the return address.
+    __ pop(ecx);
+    __ push(edx);
+    __ push(eax);
     __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
 
     // Restore return address on the stack.
     __ push(ecx);
-
     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
     // tagged as a small integer.
-    __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
-                                             : Runtime::kCompare);
+    __ TailCallRuntime(Runtime::kCompare);
   }
 
   __ bind(&miss);
@@ -1854,7 +1386,8 @@
 
   __ bind(&call_function);
   __ Set(eax, argc);
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+                                                    tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&extra_checks_or_miss);
@@ -1893,7 +1426,7 @@
 
   __ bind(&call);
   __ Set(eax, argc);
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
           RelocInfo::CODE_TARGET);
 
   __ bind(&uninitialized);
@@ -2015,16 +1548,22 @@
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
+  // Reserve space on the stack for the three arguments passed to the call. If
+  // result size is greater than can be returned in registers, also reserve
+  // space for the hidden argument for the result location, and space for the
+  // result itself.
+  int arg_stack_space = result_size() < 3 ? 3 : 4 + result_size();
+
   // Enter the exit frame that transitions from JavaScript to C++.
   if (argv_in_register()) {
     DCHECK(!save_doubles());
-    __ EnterApiExitFrame(3);
+    __ EnterApiExitFrame(arg_stack_space);
 
     // Move argc and argv into the correct registers.
     __ mov(esi, ecx);
     __ mov(edi, eax);
   } else {
-    __ EnterExitFrame(save_doubles());
+    __ EnterExitFrame(arg_stack_space, save_doubles());
   }
 
   // ebx: pointer to C function  (C callee-saved)
@@ -2039,14 +1578,36 @@
   if (FLAG_debug_code) {
     __ CheckStackAlignment();
   }
-
   // Call C function.
-  __ mov(Operand(esp, 0 * kPointerSize), edi);  // argc.
-  __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
-  __ mov(Operand(esp, 2 * kPointerSize),
-         Immediate(ExternalReference::isolate_address(isolate())));
+  if (result_size() <= 2) {
+    __ mov(Operand(esp, 0 * kPointerSize), edi);  // argc.
+    __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
+    __ mov(Operand(esp, 2 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(isolate())));
+  } else {
+    DCHECK_EQ(3, result_size());
+    // Pass a pointer to the result location as the first argument.
+    __ lea(eax, Operand(esp, 4 * kPointerSize));
+    __ mov(Operand(esp, 0 * kPointerSize), eax);
+    __ mov(Operand(esp, 1 * kPointerSize), edi);  // argc.
+    __ mov(Operand(esp, 2 * kPointerSize), esi);  // argv.
+    __ mov(Operand(esp, 3 * kPointerSize),
+           Immediate(ExternalReference::isolate_address(isolate())));
+  }
   __ call(ebx);
-  // Result is in eax or edx:eax - do not destroy these registers!
+
+  if (result_size() > 2) {
+    DCHECK_EQ(3, result_size());
+#ifndef _WIN32
+    // Restore the "hidden" argument on the stack which was popped by caller.
+    __ sub(esp, Immediate(kPointerSize));
+#endif
+    // Read result values stored on stack. Result is stored above the arguments.
+    __ mov(kReturnRegister0, Operand(esp, 4 * kPointerSize));
+    __ mov(kReturnRegister1, Operand(esp, 5 * kPointerSize));
+    __ mov(kReturnRegister2, Operand(esp, 6 * kPointerSize));
+  }
+  // Result is in eax, edx:eax or edi:edx:eax - do not destroy these registers!
 
   // Check result for exception sentinel.
   Label exception_returned;
@@ -2117,6 +1678,16 @@
   // Compute the handler entry address and jump to it.
   __ mov(edi, Operand::StaticVariable(pending_handler_code_address));
   __ mov(edx, Operand::StaticVariable(pending_handler_offset_address));
+  // Check whether it's a turbofanned exception handler code before jump to it.
+  Label not_turbo;
+  __ push(eax);
+  __ mov(eax, Operand(edi, Code::kKindSpecificFlags1Offset - kHeapObjectTag));
+  __ and_(eax, Immediate(1 << Code::kIsTurbofannedBit));
+  __ j(zero, &not_turbo);
+  __ fninit();
+  __ fld1();
+  __ bind(&not_turbo);
+  __ pop(eax);
   __ lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
   __ jmp(edi);
 }
@@ -2840,6 +2411,42 @@
 }
 
 
+void ToNameStub::Generate(MacroAssembler* masm) {
+  // The ToName stub takes one argument in eax.
+  Label is_number;
+  __ JumpIfSmi(eax, &is_number, Label::kNear);
+
+  Label not_name;
+  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+  __ CmpObjectType(eax, LAST_NAME_TYPE, edi);
+  // eax: receiver
+  // edi: receiver map
+  __ j(above, &not_name, Label::kNear);
+  __ Ret();
+  __ bind(&not_name);
+
+  Label not_heap_number;
+  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ bind(&is_number);
+  NumberToStringStub stub(isolate());
+  __ TailCallStub(&stub);
+  __ bind(&not_heap_number);
+
+  Label not_oddball;
+  __ CmpInstanceType(edi, ODDBALL_TYPE);
+  __ j(not_equal, &not_oddball, Label::kNear);
+  __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+
+  __ pop(ecx);   // Pop return address.
+  __ push(eax);  // Push argument.
+  __ push(ecx);  // Push return address.
+  __ TailCallRuntime(Runtime::kToName);
+}
+
+
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -3043,19 +2650,15 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
   __ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
-  if (op() != Token::EQ_STRICT && is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
-  } else {
-    if (!Token::IsEqualityOp(op())) {
-      __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
-      __ AssertSmi(eax);
-      __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
-      __ AssertSmi(edx);
-      __ xchg(eax, edx);
-    }
-    __ sub(eax, edx);
-    __ Ret();
+  if (!Token::IsEqualityOp(op())) {
+    __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
+    __ AssertSmi(eax);
+    __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
+    __ AssertSmi(edx);
+    __ xchg(eax, edx);
   }
+  __ sub(eax, edx);
+  __ Ret();
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -3117,7 +2720,7 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
                      CompareICState::GENERIC, CompareICState::GENERIC);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
@@ -3360,8 +2963,6 @@
   if (Token::IsEqualityOp(op())) {
     __ sub(eax, edx);
     __ ret(0);
-  } else if (is_strong(strength())) {
-    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
   } else {
     __ PopReturnAddressTo(ecx);
     __ Push(edx);
@@ -3665,11 +3266,8 @@
                            regs_.scratch0(),
                            &dont_need_remembered_set);
 
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch0(),
-                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                     not_zero,
-                     &dont_need_remembered_set);
+    __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+                        &dont_need_remembered_set);
 
     // First notify the incremental marker if necessary, then update the
     // remembered set.
@@ -4370,7 +3968,6 @@
   __ jmp(&compare_map);
 }
 
-
 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // edx
   Register key = VectorStoreICDescriptor::NameRegister();           // ecx
@@ -4426,14 +4023,12 @@
   KeyedStoreIC::GenerateMiss(masm);
 }
 
-
 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(ebx);
   CallICStub stub(isolate(), state());
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 }
 
-
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
@@ -4441,7 +4036,6 @@
   }
 }
 
-
 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
   // Save volatile registers.
   const int kNumSavedRegisters = 3;
@@ -4473,18 +4067,15 @@
   __ ret(0);
 }
 
-
-template<class T>
+template <class T>
 static void CreateArrayDispatch(MacroAssembler* masm,
                                 AllocationSiteOverrideMode mode) {
   if (mode == DISABLE_ALLOCATION_SITES) {
-    T stub(masm->isolate(),
-           GetInitialFastElementsKind(),
-           mode);
+    T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
     __ TailCallStub(&stub);
   } else if (mode == DONT_OVERRIDE) {
-    int last_index = GetSequenceIndexFromFastElementsKind(
-        TERMINAL_FAST_ELEMENTS_KIND);
+    int last_index =
+        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
     for (int i = 0; i <= last_index; ++i) {
       Label next;
       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
@@ -4502,7 +4093,6 @@
   }
 }
 
-
 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
                                            AllocationSiteOverrideMode mode) {
   // ebx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
@@ -4534,14 +4124,12 @@
     ElementsKind initial = GetInitialFastElementsKind();
     ElementsKind holey_initial = GetHoleyElementsKind(initial);
 
-    ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
-                                                  holey_initial,
-                                                  DISABLE_ALLOCATION_SITES);
+    ArraySingleArgumentConstructorStub stub_holey(
+        masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
     __ TailCallStub(&stub_holey);
 
     __ bind(&normal_sequence);
-    ArraySingleArgumentConstructorStub stub(masm->isolate(),
-                                            initial,
+    ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
                                             DISABLE_ALLOCATION_SITES);
     __ TailCallStub(&stub);
   } else if (mode == DONT_OVERRIDE) {
@@ -4564,8 +4152,8 @@
            Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
 
     __ bind(&normal_sequence);
-    int last_index = GetSequenceIndexFromFastElementsKind(
-        TERMINAL_FAST_ELEMENTS_KIND);
+    int last_index =
+        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
     for (int i = 0; i <= last_index; ++i) {
       Label next;
       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
@@ -4583,11 +4171,10 @@
   }
 }
 
-
-template<class T>
+template <class T>
 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
-  int to_index = GetSequenceIndexFromFastElementsKind(
-      TERMINAL_FAST_ELEMENTS_KIND);
+  int to_index =
+      GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
   for (int i = 0; i <= to_index; ++i) {
     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
     T stub(isolate, kind);
@@ -4599,7 +4186,6 @@
   }
 }
 
-
 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
       isolate);
@@ -4609,10 +4195,9 @@
       isolate);
 }
 
-
 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
     Isolate* isolate) {
-  ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+  ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
   for (int i = 0; i < 2; i++) {
     // For internal arrays we only need a few things
     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@@ -4624,10 +4209,8 @@
   }
 }
 
-
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
-    MacroAssembler* masm,
-    AllocationSiteOverrideMode mode) {
+    MacroAssembler* masm, AllocationSiteOverrideMode mode) {
   if (argument_count() == ANY) {
     Label not_zero_case, not_one_case;
     __ test(eax, eax);
@@ -4652,7 +4235,6 @@
   }
 }
 
-
 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : argc (only if argument_count() is ANY or MORE_THAN_ONE)
@@ -4726,9 +4308,8 @@
   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
 }
 
-
-void InternalArrayConstructorStub::GenerateCase(
-    MacroAssembler* masm, ElementsKind kind) {
+void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
+                                                ElementsKind kind) {
   Label not_zero_case, not_one_case;
   Label normal_sequence;
 
@@ -4748,8 +4329,8 @@
     __ test(ecx, ecx);
     __ j(zero, &normal_sequence);
 
-    InternalArraySingleArgumentConstructorStub
-        stub1_holey(isolate(), GetHoleyElementsKind(kind));
+    InternalArraySingleArgumentConstructorStub stub1_holey(
+        isolate(), GetHoleyElementsKind(kind));
     __ TailCallStub(&stub1_holey);
   }
 
@@ -4762,7 +4343,6 @@
   __ TailCallStub(&stubN);
 }
 
-
 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : argc
@@ -4798,8 +4378,7 @@
     __ cmp(ecx, Immediate(FAST_ELEMENTS));
     __ j(equal, &done);
     __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
-    __ Assert(equal,
-              kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+    __ Assert(equal, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
     __ bind(&done);
   }
 
@@ -4812,6 +4391,639 @@
   GenerateCase(masm, FAST_ELEMENTS);
 }
 
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edi    : target
+  //  -- edx    : new target
+  //  -- esi    : context
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(edi);
+  __ AssertReceiver(edx);
+
+  // Verify that the new target is a JSFunction.
+  Label new_object;
+  __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
+  __ j(not_equal, &new_object);
+
+  // Load the initial map and verify that it's in fact a map.
+  __ mov(ecx, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
+  __ JumpIfSmi(ecx, &new_object);
+  __ CmpObjectType(ecx, MAP_TYPE, ebx);
+  __ j(not_equal, &new_object);
+
+  // Fall back to runtime if the target differs from the new target's
+  // initial map constructor.
+  __ cmp(edi, FieldOperand(ecx, Map::kConstructorOrBackPointerOffset));
+  __ j(not_equal, &new_object);
+
+  // Allocate the JSObject on the heap.
+  Label allocate, done_allocate;
+  __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
+  __ lea(ebx, Operand(ebx, times_pointer_size, 0));
+  __ Allocate(ebx, eax, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
+  __ bind(&done_allocate);
+
+  // Initialize the JSObject fields.
+  __ mov(Operand(eax, JSObject::kMapOffset), ecx);
+  __ mov(Operand(eax, JSObject::kPropertiesOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+  __ mov(Operand(eax, JSObject::kElementsOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+  __ lea(ebx, Operand(eax, JSObject::kHeaderSize));
+
+  // ----------- S t a t e -------------
+  //  -- eax    : result (untagged)
+  //  -- ebx    : result fields (untagged)
+  //  -- edi    : result end (untagged)
+  //  -- ecx    : initial map
+  //  -- esi    : context
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  // Perform in-object slack tracking if requested.
+  Label slack_tracking;
+  STATIC_ASSERT(Map::kNoSlackTracking == 0);
+  __ test(FieldOperand(ecx, Map::kBitField3Offset),
+          Immediate(Map::ConstructionCounter::kMask));
+  __ j(not_zero, &slack_tracking, Label::kNear);
+  {
+    // Initialize all in-object fields with undefined.
+    __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+    __ InitializeFieldsWithFiller(ebx, edi, edx);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ inc(eax);
+    __ Ret();
+  }
+  __ bind(&slack_tracking);
+  {
+    // Decrease generous allocation count.
+    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+    __ sub(FieldOperand(ecx, Map::kBitField3Offset),
+           Immediate(1 << Map::ConstructionCounter::kShift));
+
+    // Initialize the in-object fields with undefined.
+    __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
+    __ neg(edx);
+    __ lea(edx, Operand(edi, edx, times_pointer_size, 0));
+    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+    __ InitializeFieldsWithFiller(ebx, edx, edi);
+
+    // Initialize the remaining (reserved) fields with one pointer filler map.
+    __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
+    __ lea(edx, Operand(ebx, edx, times_pointer_size, 0));
+    __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
+    __ InitializeFieldsWithFiller(ebx, edx, edi);
+
+    // Add the object tag to make the JSObject real.
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    __ inc(eax);
+
+    // Check if we can finalize the instance size.
+    Label finalize;
+    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+    __ test(FieldOperand(ecx, Map::kBitField3Offset),
+            Immediate(Map::ConstructionCounter::kMask));
+    __ j(zero, &finalize, Label::kNear);
+    __ Ret();
+
+    // Finalize the instance size.
+    __ bind(&finalize);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(eax);
+      __ Push(ecx);
+      __ CallRuntime(Runtime::kFinalizeInstanceSize);
+      __ Pop(eax);
+    }
+    __ Ret();
+  }
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(ebx);
+    __ Push(ecx);
+    __ Push(ebx);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ Pop(ecx);
+  }
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ dec(eax);
+  __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
+  __ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
+  __ jmp(&done_allocate);
+
+  // Fall back to %NewObject.
+  __ bind(&new_object);
+  __ PopReturnAddressTo(ecx);
+  __ Push(edi);
+  __ Push(edx);
+  __ PushReturnAddressFrom(ecx);
+  __ TailCallRuntime(Runtime::kNewObject);
+}
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edi    : function
+  //  -- esi    : context
+  //  -- ebp    : frame pointer
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make edx point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ mov(edx, ebp);
+    __ jmp(&loop_entry, Label::kNear);
+    __ bind(&loop);
+    __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+    __ j(not_equal, &loop);
+  }
+
+  // Check if we have rest parameters (only possible if we have an
+  // arguments adaptor frame below the function frame).
+  Label no_rest_parameters;
+  __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &no_rest_parameters, Label::kNear);
+
+  // Check if the arguments adaptor frame contains more arguments than
+  // specified by the function's internal formal parameter count.
+  Label rest_parameters;
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ sub(eax,
+         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ j(greater, &rest_parameters);
+
+  // Return an empty rest parameter array.
+  __ bind(&no_rest_parameters);
+  {
+    // ----------- S t a t e -------------
+    //  -- esi    : context
+    //  -- esp[0] : return address
+    // -----------------------------------
+
+    // Allocate an empty rest parameter array.
+    Label allocate, done_allocate;
+    __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the rest parameter array in rax.
+    __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
+    __ mov(FieldOperand(eax, JSArray::kMapOffset), ecx);
+    __ mov(ecx, isolate()->factory()->empty_fixed_array());
+    __ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx);
+    __ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx);
+    __ mov(FieldOperand(eax, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(0)));
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(Smi::FromInt(JSArray::kSize));
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+    }
+    __ jmp(&done_allocate);
+  }
+
+  __ bind(&rest_parameters);
+  {
+    // Compute the pointer to the first rest parameter (skippping the receiver).
+    __ lea(ebx,
+           Operand(ebx, eax, times_half_pointer_size,
+                   StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+
+    // ----------- S t a t e -------------
+    //  -- esi    : context
+    //  -- eax    : number of rest parameters (tagged)
+    //  -- ebx    : pointer to first rest parameters
+    //  -- esp[0] : return address
+    // -----------------------------------
+
+    // Allocate space for the rest parameter array plus the backing store.
+    Label allocate, done_allocate;
+    __ lea(ecx, Operand(eax, times_half_pointer_size,
+                        JSArray::kSize + FixedArray::kHeaderSize));
+    __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+    __ bind(&done_allocate);
+
+    // Setup the elements array in edx.
+    __ mov(FieldOperand(edx, FixedArray::kMapOffset),
+           isolate()->factory()->fixed_array_map());
+    __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
+    {
+      Label loop, done_loop;
+      __ Move(ecx, Smi::FromInt(0));
+      __ bind(&loop);
+      __ cmp(ecx, eax);
+      __ j(equal, &done_loop, Label::kNear);
+      __ mov(edi, Operand(ebx, 0 * kPointerSize));
+      __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
+                          FixedArray::kHeaderSize),
+             edi);
+      __ sub(ebx, Immediate(1 * kPointerSize));
+      __ add(ecx, Immediate(Smi::FromInt(1)));
+      __ jmp(&loop);
+      __ bind(&done_loop);
+    }
+
+    // Setup the rest parameter array in edi.
+    __ lea(edi,
+           Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
+    __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
+    __ mov(FieldOperand(edi, JSArray::kMapOffset), ecx);
+    __ mov(FieldOperand(edi, JSArray::kPropertiesOffset),
+           isolate()->factory()->empty_fixed_array());
+    __ mov(FieldOperand(edi, JSArray::kElementsOffset), edx);
+    __ mov(FieldOperand(edi, JSArray::kLengthOffset), eax);
+    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+    __ mov(eax, edi);
+    __ Ret();
+
+    // Fall back to %AllocateInNewSpace.
+    __ bind(&allocate);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(ecx);
+      __ Push(eax);
+      __ Push(ebx);
+      __ Push(ecx);
+      __ CallRuntime(Runtime::kAllocateInNewSpace);
+      __ mov(edx, eax);
+      __ Pop(ebx);
+      __ Pop(eax);
+    }
+    __ jmp(&done_allocate);
+  }
+}
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edi    : function
+  //  -- esi    : context
+  //  -- ebp    : frame pointer
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx,
+         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ lea(edx, Operand(ebp, ecx, times_half_pointer_size,
+                      StandardFrameConstants::kCallerSPOffset));
+
+  // ecx : number of parameters (tagged)
+  // edx : parameters pointer
+  // edi : function
+  // esp[0] : return address
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adaptor_frame, Label::kNear);
+
+  // No adaptor, parameter count = argument count.
+  __ mov(ebx, ecx);
+  __ push(ecx);
+  __ jmp(&try_allocate, Label::kNear);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ mov(ebx, ecx);
+  __ push(ecx);
+  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ lea(edx,
+         Operand(edx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
+
+  // ebx = parameter count (tagged)
+  // ecx = argument count (smi-tagged)
+  // Compute the mapped parameter count = min(ebx, ecx) in ebx.
+  __ cmp(ebx, ecx);
+  __ j(less_equal, &try_allocate, Label::kNear);
+  __ mov(ebx, ecx);
+
+  // Save mapped parameter count and function.
+  __ bind(&try_allocate);
+  __ push(edi);
+  __ push(ebx);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  Label no_parameter_map;
+  __ test(ebx, ebx);
+  __ j(zero, &no_parameter_map, Label::kNear);
+  __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
+  __ bind(&no_parameter_map);
+
+  // 2. Backing store.
+  __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
+
+  // Do the allocation of all three objects in one go.
+  __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
+
+  // eax = address of new object(s) (tagged)
+  // ecx = argument count (smi-tagged)
+  // esp[0] = mapped parameter count (tagged)
+  // esp[4] = function
+  // esp[8] = parameter count (tagged)
+  // Get the arguments map from the current native context into edi.
+  Label has_mapped_parameters, instantiate;
+  __ mov(edi, NativeContextOperand());
+  __ mov(ebx, Operand(esp, 0 * kPointerSize));
+  __ test(ebx, ebx);
+  __ j(not_zero, &has_mapped_parameters, Label::kNear);
+  __ mov(
+      edi,
+      Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
+  __ jmp(&instantiate, Label::kNear);
+
+  __ bind(&has_mapped_parameters);
+  __ mov(edi, Operand(edi, Context::SlotOffset(
+                               Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
+  __ bind(&instantiate);
+
+  // eax = address of new object (tagged)
+  // ebx = mapped parameter count (tagged)
+  // ecx = argument count (smi-tagged)
+  // edi = address of arguments map (tagged)
+  // esp[0] = mapped parameter count (tagged)
+  // esp[4] = function
+  // esp[8] = parameter count (tagged)
+  // Copy the JS object part.
+  __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+
+  // Set up the callee in-object property.
+  STATIC_ASSERT(JSSloppyArgumentsObject::kCalleeIndex == 1);
+  __ mov(edi, Operand(esp, 1 * kPointerSize));
+  __ AssertNotSmi(edi);
+  __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kCalleeOffset), edi);
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(ecx);
+  __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kLengthOffset), ecx);
+
+  // Set up the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, edi will point there, otherwise to the
+  // backing store.
+  __ lea(edi, Operand(eax, JSSloppyArgumentsObject::kSize));
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+
+  // eax = address of new object (tagged)
+  // ebx = mapped parameter count (tagged)
+  // ecx = argument count (tagged)
+  // edx = address of receiver argument
+  // edi = address of parameter map or backing store (tagged)
+  // esp[0] = mapped parameter count (tagged)
+  // esp[4] = function
+  // esp[8] = parameter count (tagged)
+  // Free two registers.
+  __ push(edx);
+  __ push(eax);
+
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  __ test(ebx, ebx);
+  __ j(zero, &skip_parameter_map);
+
+  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+         Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
+  __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
+  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
+  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
+  __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
+  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+  __ push(ecx);
+  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+  __ add(ebx, Operand(esp, 5 * kPointerSize));
+  __ sub(ebx, eax);
+  __ mov(ecx, isolate()->factory()->the_hole_value());
+  __ mov(edx, edi);
+  __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
+  // eax = loop variable (tagged)
+  // ebx = mapping index (tagged)
+  // ecx = the hole value
+  // edx = address of parameter map (tagged)
+  // edi = address of backing store (tagged)
+  // esp[0] = argument count (tagged)
+  // esp[4] = address of new object (tagged)
+  // esp[8] = address of receiver argument
+  // esp[12] = mapped parameter count (tagged)
+  // esp[16] = function
+  // esp[20] = parameter count (tagged)
+  __ jmp(&parameters_test, Label::kNear);
+
+  __ bind(&parameters_loop);
+  __ sub(eax, Immediate(Smi::FromInt(1)));
+  __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
+  __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
+  __ add(ebx, Immediate(Smi::FromInt(1)));
+  __ bind(&parameters_test);
+  __ test(eax, eax);
+  __ j(not_zero, &parameters_loop, Label::kNear);
+  __ pop(ecx);
+
+  __ bind(&skip_parameter_map);
+
+  // ecx = argument count (tagged)
+  // edi = address of backing store (tagged)
+  // esp[0] = address of new object (tagged)
+  // esp[4] = address of receiver argument
+  // esp[8] = mapped parameter count (tagged)
+  // esp[12] = function
+  // esp[16] = parameter count (tagged)
+  // Copy arguments header and remaining slots (if there are any).
+  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+         Immediate(isolate()->factory()->fixed_array_map()));
+  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
+  Label arguments_loop, arguments_test;
+  __ mov(ebx, Operand(esp, 2 * kPointerSize));
+  __ mov(edx, Operand(esp, 1 * kPointerSize));
+  __ sub(edx, ebx);  // Is there a smarter way to do negative scaling?
+  __ sub(edx, ebx);
+  __ jmp(&arguments_test, Label::kNear);
+
+  __ bind(&arguments_loop);
+  __ sub(edx, Immediate(kPointerSize));
+  __ mov(eax, Operand(edx, 0));
+  __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
+  __ add(ebx, Immediate(Smi::FromInt(1)));
+
+  __ bind(&arguments_test);
+  __ cmp(ebx, ecx);
+  __ j(less, &arguments_loop, Label::kNear);
+
+  // Restore.
+  __ pop(eax);  // Address of arguments object.
+  __ Drop(4);
+
+  // Return.
+  __ ret(0);
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  __ pop(eax);   // Remove saved mapped parameter count.
+  __ pop(edi);   // Pop saved function.
+  __ pop(eax);   // Remove saved parameter count.
+  __ pop(eax);   // Pop return address.
+  __ push(edi);  // Push function.
+  __ push(edx);  // Push parameters pointer.
+  __ push(ecx);  // Push parameter count.
+  __ push(eax);  // Push return address.
+  __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- edi    : function
+  //  -- esi    : context
+  //  -- ebp    : frame pointer
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ AssertFunction(edi);
+
+  // For Ignition we need to skip all possible handler/stub frames until
+  // we reach the JavaScript frame for the function (similar to what the
+  // runtime fallback implementation does). So make edx point to that
+  // JavaScript frame.
+  {
+    Label loop, loop_entry;
+    __ mov(edx, ebp);
+    __ jmp(&loop_entry, Label::kNear);
+    __ bind(&loop);
+    __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+    __ bind(&loop_entry);
+    __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+    __ j(not_equal, &loop);
+  }
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &arguments_adaptor, Label::kNear);
+  {
+    __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(eax,
+           FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ lea(ebx,
+           Operand(edx, eax, times_half_pointer_size,
+                   StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+  }
+  __ jmp(&arguments_done, Label::kNear);
+  __ bind(&arguments_adaptor);
+  {
+    __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+    __ lea(ebx,
+           Operand(ebx, eax, times_half_pointer_size,
+                   StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+  }
+  __ bind(&arguments_done);
+
+  // ----------- S t a t e -------------
+  //  -- eax    : number of arguments (tagged)
+  //  -- ebx    : pointer to the first argument
+  //  -- esi    : context
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  // Allocate space for the strict arguments object plus the backing store.
+  Label allocate, done_allocate;
+  __ lea(ecx,
+         Operand(eax, times_half_pointer_size,
+                 JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+  __ bind(&done_allocate);
+
+  // Setup the elements array in edx.
+  __ mov(FieldOperand(edx, FixedArray::kMapOffset),
+         isolate()->factory()->fixed_array_map());
+  __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
+  {
+    Label loop, done_loop;
+    __ Move(ecx, Smi::FromInt(0));
+    __ bind(&loop);
+    __ cmp(ecx, eax);
+    __ j(equal, &done_loop, Label::kNear);
+    __ mov(edi, Operand(ebx, 0 * kPointerSize));
+    __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
+                        FixedArray::kHeaderSize),
+           edi);
+    __ sub(ebx, Immediate(1 * kPointerSize));
+    __ add(ecx, Immediate(Smi::FromInt(1)));
+    __ jmp(&loop);
+    __ bind(&done_loop);
+  }
+
+  // Setup the rest parameter array in edi.
+  __ lea(edi,
+         Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
+  __ LoadGlobalFunction(Context::STRICT_ARGUMENTS_MAP_INDEX, ecx);
+  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kMapOffset), ecx);
+  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kPropertiesOffset),
+         isolate()->factory()->empty_fixed_array());
+  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kElementsOffset), edx);
+  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kLengthOffset), eax);
+  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+  __ mov(eax, edi);
+  __ Ret();
+
+  // Fall back to %AllocateInNewSpace.
+  __ bind(&allocate);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(ecx);
+    __ Push(eax);
+    __ Push(ebx);
+    __ Push(ecx);
+    __ CallRuntime(Runtime::kAllocateInNewSpace);
+    __ mov(edx, eax);
+    __ Pop(ebx);
+    __ Pop(eax);
+  }
+  __ jmp(&done_allocate);
+}
 
 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
   Register context_reg = esi;
@@ -5150,11 +5362,10 @@
   __ jmp(&leave_exit_frame);
 }
 
-
 static void CallApiFunctionStubHelper(MacroAssembler* masm,
                                       const ParameterCount& argc,
                                       bool return_first_arg,
-                                      bool call_data_undefined) {
+                                      bool call_data_undefined, bool is_lazy) {
   // ----------- S t a t e -------------
   //  -- edi                 : callee
   //  -- ebx                 : call_data
@@ -5228,8 +5439,10 @@
   // push return address
   __ push(return_address);
 
-  // load context from callee
-  __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+  if (!is_lazy) {
+    // load context from callee
+    __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+  }
 
   // API function gets reference to the v8::Arguments. If CPU profiler
   // is enabled wrapper function will be called and we need to pass
@@ -5301,7 +5514,7 @@
 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
   bool call_data_undefined = this->call_data_undefined();
   CallApiFunctionStubHelper(masm, ParameterCount(eax), false,
-                            call_data_undefined);
+                            call_data_undefined, false);
 }
 
 
@@ -5309,45 +5522,58 @@
   bool is_store = this->is_store();
   int argc = this->argc();
   bool call_data_undefined = this->call_data_undefined();
+  bool is_lazy = this->is_lazy();
   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
-                            call_data_undefined);
+                            call_data_undefined, is_lazy);
 }
 
 
 void CallApiGetterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- esp[0]                  : return address
-  //  -- esp[4]                  : name
-  //  -- esp[8 - kArgsLength*4]  : PropertyCallbackArguments object
+  //  -- esp[0]                        : return address
+  //  -- esp[4]                        : name
+  //  -- esp[8 .. (8 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
   //  -- ...
-  //  -- edx                    : api_function_address
+  //  -- edx                           : api_function_address
   // -----------------------------------
   DCHECK(edx.is(ApiGetterDescriptor::function_address()));
 
-  // array for v8::Arguments::values_, handler for name and pointer
-  // to the values (it considered as smi in GC).
-  const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
-  // Allocate space for opional callback address parameter in case
-  // CPU profiler is active.
-  const int kApiArgc = 2 + 1;
+  // v8::PropertyCallbackInfo::args_ array and name handle.
+  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+  // Allocate v8::PropertyCallbackInfo object, arguments for callback and
+  // space for optional callback address parameter (in case CPU profiler is
+  // active) in non-GCed stack space.
+  const int kApiArgc = 3 + 1;
 
   Register api_function_address = edx;
   Register scratch = ebx;
 
-  // load address of name
-  __ lea(scratch, Operand(esp, 1 * kPointerSize));
+  // Load address of v8::PropertyAccessorInfo::args_ array.
+  __ lea(scratch, Operand(esp, 2 * kPointerSize));
 
   PrepareCallApiFunction(masm, kApiArgc);
+  // Create v8::PropertyCallbackInfo object on the stack and initialize
+  // it's args_ field.
+  Operand info_object = ApiParameterOperand(3);
+  __ mov(info_object, scratch);
+
+  __ sub(scratch, Immediate(kPointerSize));
   __ mov(ApiParameterOperand(0), scratch);  // name.
-  __ add(scratch, Immediate(kPointerSize));
+  __ lea(scratch, info_object);
   __ mov(ApiParameterOperand(1), scratch);  // arguments pointer.
+  // Reserve space for optional callback address parameter.
+  Operand thunk_last_arg = ApiParameterOperand(2);
 
   ExternalReference thunk_ref =
       ExternalReference::invoke_accessor_getter_callback(isolate());
 
+  // +3 is to skip prolog, return address and name handle.
+  Operand return_value_operand(
+      ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
-                           ApiParameterOperand(2), kStackSpace, nullptr,
-                           Operand(ebp, 7 * kPointerSize), NULL);
+                           thunk_last_arg, kStackUnwindSpace, nullptr,
+                           return_value_operand, NULL);
 }
 
 
diff --git a/src/x87/code-stubs-x87.h b/src/x87/code-stubs-x87.h
index a6a2a13..39a4603 100644
--- a/src/x87/code-stubs-x87.h
+++ b/src/x87/code-stubs-x87.h
@@ -271,24 +271,12 @@
     // registers are eax, ecx and edx.  The three scratch registers (incl. ecx)
     // will be restored by other means so we don't bother pushing them here.
     void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
-      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
-      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
-      if (mode == kSaveFPRegs) {
-        // Save FPU state in m108byte.
-        masm->sub(esp, Immediate(108));
-        masm->fnsave(Operand(esp, 0));
-      }
+      masm->PushCallerSaved(mode, ecx, scratch0_, scratch1_);
     }
 
     inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
                                            SaveFPRegsMode mode) {
-      if (mode == kSaveFPRegs) {
-        // Restore FPU state in m108byte.
-        masm->frstor(Operand(esp, 0));
-        masm->add(esp, Immediate(108));
-      }
-      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
-      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
+      masm->PopCallerSaved(mode, ecx, scratch0_, scratch1_);
     }
 
     inline Register object() { return object_; }
diff --git a/src/x87/deoptimizer-x87.cc b/src/x87/deoptimizer-x87.cc
index 5a1951a..3b90276 100644
--- a/src/x87/deoptimizer-x87.cc
+++ b/src/x87/deoptimizer-x87.cc
@@ -169,27 +169,6 @@
 }
 
 
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  // Set the register values. The values are not important as there are no
-  // callee saved registers in JavaScript frames, so all registers are
-  // spilled. Registers ebp and esp are set to the correct values though.
-
-  for (int i = 0; i < Register::kNumRegisters; i++) {
-    input_->SetRegister(i, i * 4);
-  }
-  input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
-  input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < X87Register::kMaxNumRegisters; i++) {
-    input_->SetDoubleRegister(i, 0.0);
-  }
-
-  // Fill the frame content from the actual data on the frame.
-  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
-    input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
-  }
-}
-
-
 void Deoptimizer::SetPlatformCompiledStubRegisters(
     FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
   intptr_t handler =
@@ -207,10 +186,8 @@
   }
 }
 
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
-  int parameter_count =
-      function->shared()->internal_formal_parameter_count() + 1;
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
+  int parameter_count = shared->internal_formal_parameter_count() + 1;
   unsigned input_frame_size = input_->GetFrameSize();
   unsigned alignment_state_offset =
       input_frame_size - parameter_count * kPointerSize -
diff --git a/src/x87/disasm-x87.cc b/src/x87/disasm-x87.cc
index bf2200c..a3f1939 100644
--- a/src/x87/disasm-x87.cc
+++ b/src/x87/disasm-x87.cc
@@ -28,32 +28,30 @@
   OperandOrder op_order_;
 };
 
-
 static const ByteMnemonic two_operands_instr[] = {
-  {0x01, "add", OPER_REG_OP_ORDER},
-  {0x03, "add", REG_OPER_OP_ORDER},
-  {0x09, "or", OPER_REG_OP_ORDER},
-  {0x0B, "or", REG_OPER_OP_ORDER},
-  {0x1B, "sbb", REG_OPER_OP_ORDER},
-  {0x21, "and", OPER_REG_OP_ORDER},
-  {0x23, "and", REG_OPER_OP_ORDER},
-  {0x29, "sub", OPER_REG_OP_ORDER},
-  {0x2A, "subb", REG_OPER_OP_ORDER},
-  {0x2B, "sub", REG_OPER_OP_ORDER},
-  {0x31, "xor", OPER_REG_OP_ORDER},
-  {0x33, "xor", REG_OPER_OP_ORDER},
-  {0x38, "cmpb", OPER_REG_OP_ORDER},
-  {0x3A, "cmpb", REG_OPER_OP_ORDER},
-  {0x3B, "cmp", REG_OPER_OP_ORDER},
-  {0x84, "test_b", REG_OPER_OP_ORDER},
-  {0x85, "test", REG_OPER_OP_ORDER},
-  {0x87, "xchg", REG_OPER_OP_ORDER},
-  {0x8A, "mov_b", REG_OPER_OP_ORDER},
-  {0x8B, "mov", REG_OPER_OP_ORDER},
-  {0x8D, "lea", REG_OPER_OP_ORDER},
-  {-1, "", UNSET_OP_ORDER}
-};
-
+    {0x01, "add", OPER_REG_OP_ORDER},
+    {0x03, "add", REG_OPER_OP_ORDER},
+    {0x09, "or", OPER_REG_OP_ORDER},
+    {0x0B, "or", REG_OPER_OP_ORDER},
+    {0x1B, "sbb", REG_OPER_OP_ORDER},
+    {0x21, "and", OPER_REG_OP_ORDER},
+    {0x23, "and", REG_OPER_OP_ORDER},
+    {0x29, "sub", OPER_REG_OP_ORDER},
+    {0x2A, "subb", REG_OPER_OP_ORDER},
+    {0x2B, "sub", REG_OPER_OP_ORDER},
+    {0x31, "xor", OPER_REG_OP_ORDER},
+    {0x33, "xor", REG_OPER_OP_ORDER},
+    {0x38, "cmpb", OPER_REG_OP_ORDER},
+    {0x39, "cmp", OPER_REG_OP_ORDER},
+    {0x3A, "cmpb", REG_OPER_OP_ORDER},
+    {0x3B, "cmp", REG_OPER_OP_ORDER},
+    {0x84, "test_b", REG_OPER_OP_ORDER},
+    {0x85, "test", REG_OPER_OP_ORDER},
+    {0x87, "xchg", REG_OPER_OP_ORDER},
+    {0x8A, "mov_b", REG_OPER_OP_ORDER},
+    {0x8B, "mov", REG_OPER_OP_ORDER},
+    {0x8D, "lea", REG_OPER_OP_ORDER},
+    {-1, "", UNSET_OP_ORDER}};
 
 static const ByteMnemonic zero_operands_instr[] = {
   {0xC3, "ret", UNSET_OP_ORDER},
diff --git a/src/x87/interface-descriptors-x87.cc b/src/x87/interface-descriptors-x87.cc
index 5bd84fc..bfed342 100644
--- a/src/x87/interface-descriptors-x87.cc
+++ b/src/x87/interface-descriptors-x87.cc
@@ -59,20 +59,6 @@
 const Register StringCompareDescriptor::RightRegister() { return eax; }
 
 
-const Register ArgumentsAccessReadDescriptor::index() { return edx; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return eax; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return edi; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return ecx; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return edx; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return ecx; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return edx; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return ebx; }
-
-
 const Register ApiGetterDescriptor::function_address() { return edx; }
 
 
@@ -101,6 +87,29 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi, edx};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {edi};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
 
 void ToNumberDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -119,6 +128,10 @@
 
 
 // static
+const Register ToNameDescriptor::ReceiverRegister() { return eax; }
+
+
+// static
 const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
 
 
@@ -171,13 +184,6 @@
 }
 
 
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {ecx, eax};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 void CallFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {edi};
@@ -413,6 +419,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+      kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -424,7 +437,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
@@ -436,7 +448,6 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
 void InterpreterCEntryDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {
diff --git a/src/x87/macro-assembler-x87.cc b/src/x87/macro-assembler-x87.cc
index 7a0beb5..0c459eb 100644
--- a/src/x87/macro-assembler-x87.cc
+++ b/src/x87/macro-assembler-x87.cc
@@ -120,29 +120,56 @@
   Push(isolate()->heap()->root_handle(index));
 }
 
+#define REG(Name) \
+  { Register::kCode_##Name }
 
-void MacroAssembler::InNewSpace(
-    Register object,
-    Register scratch,
-    Condition cc,
-    Label* condition_met,
-    Label::Distance condition_met_distance) {
-  DCHECK(cc == equal || cc == not_equal);
-  if (scratch.is(object)) {
-    and_(scratch, Immediate(~Page::kPageAlignmentMask));
-  } else {
-    mov(scratch, Immediate(~Page::kPageAlignmentMask));
-    and_(scratch, object);
+static const Register saved_regs[] = {REG(eax), REG(ecx), REG(edx)};
+
+#undef REG
+
+static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+
+void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+                                     Register exclusion1, Register exclusion2,
+                                     Register exclusion3) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  for (int i = 0; i < kNumberOfSavedRegs; i++) {
+    Register reg = saved_regs[i];
+    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+      push(reg);
+    }
   }
-  // Check that we can use a test_b.
-  DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
-  DCHECK(MemoryChunk::IN_TO_SPACE < 8);
-  int mask = (1 << MemoryChunk::IN_FROM_SPACE)
-           | (1 << MemoryChunk::IN_TO_SPACE);
-  // If non-zero, the page belongs to new-space.
-  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
-         static_cast<uint8_t>(mask));
-  j(cc, condition_met, condition_met_distance);
+  if (fp_mode == kSaveFPRegs) {
+    // Save FPU state in m108byte.
+    sub(esp, Immediate(108));
+    fnsave(Operand(esp, 0));
+  }
+}
+
+void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+                                    Register exclusion2, Register exclusion3) {
+  if (fp_mode == kSaveFPRegs) {
+    // Restore FPU state in m108byte.
+    frstor(Operand(esp, 0));
+    add(esp, Immediate(108));
+  }
+
+  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
+    Register reg = saved_regs[i];
+    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+      pop(reg);
+    }
+  }
+}
+
+void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
+                                Label* condition_met,
+                                Label::Distance distance) {
+  const int mask =
+      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+  CheckPageFlag(object, scratch, mask, cc, condition_met, distance);
 }
 
 
@@ -492,6 +519,75 @@
   }
 }
 
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+                                               Register code_entry,
+                                               Register scratch) {
+  const int offset = JSFunction::kCodeEntryOffset;
+
+  // Since a code entry (value) is always in old space, we don't need to update
+  // remembered set. If incremental marking is off, there is nothing for us to
+  // do.
+  if (!FLAG_incremental_marking) return;
+
+  DCHECK(!js_function.is(code_entry));
+  DCHECK(!js_function.is(scratch));
+  DCHECK(!code_entry.is(scratch));
+  AssertNotSmi(js_function);
+
+  if (emit_debug_code()) {
+    Label ok;
+    lea(scratch, FieldOperand(js_function, offset));
+    cmp(code_entry, Operand(scratch, 0));
+    j(equal, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+
+  CheckPageFlag(code_entry, scratch,
+                MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
+                Label::kNear);
+  CheckPageFlag(js_function, scratch,
+                MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
+                Label::kNear);
+
+  // Save input registers.
+  push(js_function);
+  push(code_entry);
+
+  const Register dst = scratch;
+  lea(dst, FieldOperand(js_function, offset));
+
+  // Save caller-saved registers.
+  PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+  int argument_count = 3;
+  PrepareCallCFunction(argument_count, code_entry);
+  mov(Operand(esp, 0 * kPointerSize), js_function);
+  mov(Operand(esp, 1 * kPointerSize), dst);  // Slot.
+  mov(Operand(esp, 2 * kPointerSize),
+      Immediate(ExternalReference::isolate_address(isolate())));
+
+  {
+    AllowExternalCallThatCantCauseGC scope(this);
+    CallCFunction(
+        ExternalReference::incremental_marking_record_write_code_entry_function(
+            isolate()),
+        argument_count);
+  }
+
+  // Restore caller-saved registers.
+  PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+  // Restore input registers.
+  pop(code_entry);
+  pop(js_function);
+
+  bind(&done);
+}
 
 void MacroAssembler::DebugBreak() {
   Move(eax, Immediate(0));
@@ -804,6 +900,17 @@
   }
 }
 
+void MacroAssembler::AssertReceiver(Register object) {
+  if (emit_debug_code()) {
+    test(object, Immediate(kSmiTagMask));
+    Check(not_equal, kOperandIsASmiAndNotAReceiver);
+    Push(object);
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
+    Pop(object);
+    Check(above_equal, kOperandIsNotAReceiver);
+  }
+}
 
 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
   if (emit_debug_code()) {
@@ -936,7 +1043,7 @@
 }
 
 
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
+void MacroAssembler::EnterExitFrame(int argc, bool save_doubles) {
   EnterExitFramePrologue();
 
   // Set up argc and argv in callee-saved registers.
@@ -945,7 +1052,7 @@
   lea(esi, Operand(ebp, eax, times_4, offset));
 
   // Reserve space for argc, argv and isolate.
-  EnterExitFrameEpilogue(3, save_doubles);
+  EnterExitFrameEpilogue(argc, save_doubles);
 }
 
 
@@ -1734,13 +1841,13 @@
                                                 Register end_address,
                                                 Register filler) {
   Label loop, entry;
-  jmp(&entry);
+  jmp(&entry, Label::kNear);
   bind(&loop);
   mov(Operand(current_address, 0), filler);
   add(current_address, Immediate(kPointerSize));
   bind(&entry);
   cmp(current_address, end_address);
-  j(below, &loop);
+  j(below, &loop, Label::kNear);
 }
 
 
@@ -1762,9 +1869,9 @@
                                       Label* then_label) {
   Label ok;
   test(result, result);
-  j(not_zero, &ok);
+  j(not_zero, &ok, Label::kNear);
   test(op, op);
-  j(sign, then_label);
+  j(sign, then_label, Label::kNear);
   bind(&ok);
 }
 
@@ -1776,10 +1883,10 @@
                                       Label* then_label) {
   Label ok;
   test(result, result);
-  j(not_zero, &ok);
+  j(not_zero, &ok, Label::kNear);
   mov(scratch, op1);
   or_(scratch, op2);
-  j(sign, then_label);
+  j(sign, then_label, Label::kNear);
   bind(&ok);
 }
 
@@ -2009,7 +2116,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -2111,26 +2218,6 @@
 }
 
 
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                                   const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  DCHECK(flag == JUMP_FUNCTION || has_frame());
-
-  // Fake a parameter count to avoid emitting code to do the check.
-  ParameterCount expected(0);
-  GetBuiltinFunction(edi, native_context_index);
-  InvokeFunctionCode(edi, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
-                                        int native_context_index) {
-  // Load the JavaScript builtin function from the builtins object.
-  mov(target, NativeContextOperand());
-  mov(target, ContextOperand(target, native_context_index));
-}
-
-
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
@@ -2524,9 +2611,9 @@
     // We don't actually want to generate a pile of code for this, so just
     // claim there is a stack frame, without generating one.
     FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   } else {
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   }
   // will not return here
   int3();
diff --git a/src/x87/macro-assembler-x87.h b/src/x87/macro-assembler-x87.h
index 9b6c5e8..fc49361 100644
--- a/src/x87/macro-assembler-x87.h
+++ b/src/x87/macro-assembler-x87.h
@@ -16,6 +16,7 @@
 // Give alias names to registers for calling conventions.
 const Register kReturnRegister0 = {Register::kCode_eax};
 const Register kReturnRegister1 = {Register::kCode_edx};
+const Register kReturnRegister2 = {Register::kCode_edi};
 const Register kJSFunctionRegister = {Register::kCode_edi};
 const Register kContextRegister = {Register::kCode_esi};
 const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
@@ -106,6 +107,16 @@
     j(not_equal, if_not_equal, if_not_equal_distance);
   }
 
+  // These functions do not arrange the registers in any particular order so
+  // they are not useful for calls that can cause a GC.  The caller can
+  // exclude up to 3 registers that do not need to be saved and restored.
+  void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+                       Register exclusion2 = no_reg,
+                       Register exclusion3 = no_reg);
+  void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+                      Register exclusion2 = no_reg,
+                      Register exclusion3 = no_reg);
+
   // ---------------------------------------------------------------------------
   // GC Support
   enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
@@ -206,6 +217,11 @@
       PointersToHereCheck pointers_to_here_check_for_value =
           kPointersToHereMaybeInteresting);
 
+  // Notify the garbage collector that we wrote a code entry into a
+  // JSFunction. Only scratch is clobbered by the operation.
+  void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+                                 Register scratch);
+
   // For page containing |object| mark the region covering the object's map
   // dirty. |object| is the object being stored into, |map| is the Map object
   // that was stored.
@@ -225,7 +241,7 @@
   // arguments in register eax and sets up the number of arguments in
   // register edi and the pointer to the first argument in register
   // esi.
-  void EnterExitFrame(bool save_doubles);
+  void EnterExitFrame(int argc, bool save_doubles);
 
   void EnterApiExitFrame(int argc);
 
@@ -270,6 +286,9 @@
   void StoreToSafepointRegisterSlot(Register dst, Immediate src);
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
+  // Nop, because x87 does not have a root register.
+  void InitializeRootRegister() {}
+
   void LoadHeapObject(Register result, Handle<HeapObject> object);
   void CmpHeapObject(Register reg, Handle<HeapObject> object);
   void PushHeapObject(Handle<HeapObject> object);
@@ -325,13 +344,6 @@
                       const ParameterCount& actual, InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
-  // Invoke specified builtin JavaScript function.
-  void InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                     const CallWrapper& call_wrapper = NullCallWrapper());
-
-  // Store the function for the given builtin in the target register.
-  void GetBuiltinFunction(Register target, int native_context_index);
-
 
   // Expression support
   // Support for constant splitting.
@@ -517,6 +529,9 @@
   // enabled via --debug-code.
   void AssertBoundFunction(Register object);
 
+  // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+  void AssertReceiver(Register object);
+
   // Abort execution if argument is not undefined or an AllocationSite, enabled
   // via --debug-code.
   void AssertUndefinedOrAllocationSite(Register object);
diff --git a/src/zone.cc b/src/zone.cc
index 9dcebba..1f722f2 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -105,7 +105,10 @@
   Address result = position_;
 
   const size_t size_with_redzone = size + kASanRedzoneBytes;
-  if (limit_ < position_ + size_with_redzone) {
+  const uintptr_t limit = reinterpret_cast<uintptr_t>(limit_);
+  const uintptr_t position = reinterpret_cast<uintptr_t>(position_);
+  // position_ > limit_ can be true after the alignment correction above.
+  if (limit < position || size_with_redzone > limit - position) {
     result = NewExpand(size_with_redzone);
   } else {
     position_ += size_with_redzone;
@@ -222,7 +225,10 @@
   // Make sure the requested size is already properly aligned and that
   // there isn't enough room in the Zone to satisfy the request.
   DCHECK_EQ(size, RoundDown(size, kAlignment));
-  DCHECK_LT(limit_, position_ + size);
+  DCHECK(limit_ < position_ ||
+         reinterpret_cast<uintptr_t>(limit_) -
+                 reinterpret_cast<uintptr_t>(position_) <
+             size);
 
   // Compute the new segment size. We use a 'high water mark'
   // strategy, where we increase the segment size every time we expand